hexsha
stringlengths 40
40
| size
int64 2
1.05M
| content
stringlengths 2
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
33555e07fcd1a51c3582ff0ab0c1b2b6fc9e7676 | 13,108 | use crate::event::ValueKind;
use chrono::{DateTime, Local, ParseError as ChronoParseError, TimeZone, Utc};
use lazy_static::lazy_static;
use snafu::{ResultExt, Snafu};
use std::collections::{HashMap, HashSet};
use std::num::{ParseFloatError, ParseIntError};
use std::path::PathBuf;
use std::str::FromStr;
use string_cache::DefaultAtom as Atom;
lazy_static! {
pub static ref DEFAULT_CONFIG_PATHS: Vec<PathBuf> = vec!["/etc/vector/vector.toml".into()];
}
#[derive(Debug, Snafu)]
pub enum ConversionError {
#[snafu(display("Unknown conversion name {:?}", name))]
UnknownConversion { name: String },
}
/// `Conversion` is a place-holder for a type conversion operation, to
/// convert from a plain (`Bytes`) `ValueKind` into another type. Every
/// variant of `ValueKind` is represented here.
#[derive(Clone)]
pub enum Conversion {
Bytes,
Integer,
Float,
Boolean,
Timestamp,
TimestampFmt(String),
TimestampTZFmt(String),
}
impl FromStr for Conversion {
type Err = ConversionError;
/// Convert the string into a type conversion. The following
/// conversion names are supported:
///
/// * `"asis"`, `"bytes"`, or `"string"` => As-is (no conversion)
/// * `"int"` or `"integer"` => Signed integer
/// * `"float"` => Floating point number
/// * `"bool"` or `"boolean"` => Boolean
/// * `"timestamp"` => Timestamp, guessed using a set of formats
/// * `"timestamp|FORMAT"` => Timestamp using the given format
///
/// Timestamp parsing does not yet support time zones.
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"asis" | "bytes" | "string" => Ok(Conversion::Bytes),
"integer" | "int" => Ok(Conversion::Integer),
"float" => Ok(Conversion::Float),
"bool" | "boolean" => Ok(Conversion::Boolean),
"timestamp" => Ok(Conversion::Timestamp),
_ if s.starts_with("timestamp|") => {
let fmt = &s[10..];
// DateTime<Utc> can only convert timestamps without
// time zones, and DateTime<FixedOffset> can only
// convert with tone zones, so this has to distinguish
// between the two types of formats.
if format_has_zone(fmt) {
Ok(Conversion::TimestampTZFmt(fmt.into()))
} else {
Ok(Conversion::TimestampFmt(fmt.into()))
}
}
_ => Err(ConversionError::UnknownConversion { name: s.into() }),
}
}
}
/// Helper function to parse a conversion map and check against a list of names
pub fn parse_check_conversion_map(
types: &HashMap<Atom, String>,
names: &[Atom],
) -> Result<HashMap<Atom, Conversion>, ConversionError> {
// Check if any named type references a nonexistent field
let names: HashSet<Atom> = names.iter().map(|s| s.into()).collect();
for name in types.keys() {
if !names.contains(name) {
warn!(
message = "Field was specified in the types but is not a valid field name.",
field = &name[..]
);
}
}
parse_conversion_map(types)
}
/// Helper function to parse a mapping of conversion descriptions into actual Conversion values.
pub fn parse_conversion_map(
types: &HashMap<Atom, String>,
) -> Result<HashMap<Atom, Conversion>, ConversionError> {
types
.iter()
.map(|(field, typename)| {
typename
.parse::<Conversion>()
.map(|conv| (field.clone(), conv))
})
.collect()
}
#[derive(Debug, Eq, PartialEq, Snafu)]
pub enum Error {
#[snafu(display("Invalid boolean value {:?}", s))]
BoolParseError { s: String },
#[snafu(display("Invalid integer {:?}: {}", s, source))]
IntParseError { s: String, source: ParseIntError },
#[snafu(display("Invalid floating point number {:?}: {}", s, source))]
FloatParseError { s: String, source: ParseFloatError },
#[snafu(display("Invalid timestamp {:?}: {}", s, source))]
TimestampParseError { s: String, source: ChronoParseError },
#[snafu(display("No matching timestamp format found for {:?}", s))]
AutoTimestampParseError { s: String },
}
impl Conversion {
/// Use this `Conversion` variant to turn the given `value` into a
/// new `ValueKind`. This will fail in unexpected ways if the
/// `value` is not currently a `ValueKind::Bytes`.
pub fn convert(&self, value: ValueKind) -> Result<ValueKind, Error> {
let bytes = value.as_bytes();
Ok(match self {
Conversion::Bytes => value,
Conversion::Integer => {
let s = String::from_utf8_lossy(&bytes);
ValueKind::Integer(s.parse::<i64>().with_context(|| IntParseError { s })?)
}
Conversion::Float => {
let s = String::from_utf8_lossy(&bytes);
ValueKind::Float(s.parse::<f64>().with_context(|| FloatParseError { s })?)
}
Conversion::Boolean => {
ValueKind::Boolean(parse_bool(&String::from_utf8_lossy(&bytes))?)
}
Conversion::Timestamp => {
ValueKind::Timestamp(parse_timestamp(&String::from_utf8_lossy(&bytes))?)
}
Conversion::TimestampFmt(format) => {
let s = String::from_utf8_lossy(&bytes);
ValueKind::Timestamp(datetime_to_utc(
Local
.datetime_from_str(&s, &format)
.with_context(|| TimestampParseError { s })?,
))
}
Conversion::TimestampTZFmt(format) => {
let s = String::from_utf8_lossy(&bytes);
ValueKind::Timestamp(datetime_to_utc(
DateTime::parse_from_str(&s, &format)
.with_context(|| TimestampParseError { s })?,
))
}
})
}
}
/// Parse a string into a native `bool`. The built in `bool::from_str`
/// only handles two cases, `"true"` and `"false"`. We want to be able
/// to convert from a more diverse set of strings. In particular, the
/// following set of source strings are allowed:
///
/// * `"true"`, `"t"`, `"yes"`, `"y"` (all case-insensitive), and
/// non-zero integers all convert to `true`.
///
/// * `"false"`, `"f"`, `"no"`, `"n"` (all case-insensitive), and `"0"`
/// all convert to `false`.
///
/// Anything else results in a parse error.
fn parse_bool(s: &str) -> Result<bool, Error> {
match s {
"true" | "t" | "yes" | "y" => Ok(true),
"false" | "f" | "no" | "n" | "0" => Ok(false),
_ => {
if let Ok(n) = s.parse::<isize>() {
Ok(n != 0)
} else {
// Do the case conversion only if simple matches fail,
// since this operation can be expensive.
match s.to_lowercase().as_str() {
"true" | "t" | "yes" | "y" => Ok(true),
"false" | "f" | "no" | "n" => Ok(false),
_ => Err(Error::BoolParseError { s: s.into() }),
}
}
}
}
}
/// Does the format specifier have a time zone option?
fn format_has_zone(fmt: &str) -> bool {
fmt.find("%Z").is_some()
|| fmt.find("%z").is_some()
|| fmt.find("%:z").is_some()
|| fmt.find("%#z").is_some()
|| fmt.find("%+").is_some()
}
/// Convert a timestamp with a non-UTC time zone into UTC
fn datetime_to_utc<TZ: TimeZone>(ts: DateTime<TZ>) -> DateTime<Utc> {
Utc.timestamp(ts.timestamp(), ts.timestamp_subsec_nanos())
}
/// The list of allowed "automatic" timestamp formats
const TIMESTAMP_FORMATS: &[&str] = &[
"%F %T", // YYYY-MM-DD HH:MM:SS
"%v %T", // DD-Mmm-YYYY HH:MM:SS
"%FT%T", // ISO 8601 / RFC 3339 without TZ
"%m/%d/%Y:%T", // ???
"%a, %d %b %Y %T", // RFC 822/2822 without TZ
"%a %d %b %T %Y", // `date` command output without TZ
"%A %d %B %T %Y", // `date` command output without TZ, long names
"%a %b %e %T %Y", // ctime format
];
/// The list of allowed "automatic" timestamp formats for UTC
const TIMESTAMP_UTC_FORMATS: &[&str] = &[
"%s", // UNIX timestamp
"%FT%TZ", // ISO 8601 / RFC 3339 UTC
];
/// The list of allowed "automatic" timestamp formats with time zones
const TIMESTAMP_TZ_FORMATS: &[&str] = &[
"%+", // ISO 8601 / RFC 3339
"%a %d %b %T %Z %Y", // `date` command output
"%a %d %b %T %z %Y", // `date` command output, numeric TZ
"%a %d %b %T %#z %Y", // `date` command output, numeric TZ
];
/// Parse a string into a timestamp using one of a set of formats
pub fn parse_timestamp(s: &str) -> Result<DateTime<Utc>, Error> {
for format in TIMESTAMP_FORMATS {
if let Ok(result) = Local.datetime_from_str(s, format) {
return Ok(datetime_to_utc(result));
}
}
for format in TIMESTAMP_UTC_FORMATS {
if let Ok(result) = Utc.datetime_from_str(s, format) {
return Ok(result);
}
}
if let Ok(result) = DateTime::parse_from_rfc3339(s) {
return Ok(datetime_to_utc(result));
}
if let Ok(result) = DateTime::parse_from_rfc2822(s) {
return Ok(datetime_to_utc(result));
}
for format in TIMESTAMP_TZ_FORMATS {
if let Ok(result) = DateTime::parse_from_str(s, format) {
return Ok(datetime_to_utc(result));
}
}
Err(Error::AutoTimestampParseError { s: s.into() })
}
#[cfg(test)]
mod tests {
use super::{parse_bool, parse_timestamp, Conversion, Error};
use crate::event::ValueKind;
use chrono::prelude::*;
const TIMEZONE: &str = "Australia/Brisbane";
fn dateref() -> DateTime<Utc> {
Utc.from_utc_datetime(&NaiveDateTime::from_timestamp(981173106, 0))
}
fn convert(fmt: &str, value: &str) -> Result<ValueKind, Error> {
std::env::set_var("TZ", TIMEZONE);
fmt.parse::<Conversion>()
.expect(&format!("Invalid conversion {:?}", fmt))
.convert(value.into())
}
#[cfg(unix)] // https://github.com/timberio/vector/issues/1201
#[test]
fn timestamp_conversion() {
assert_eq!(
convert("timestamp", "02/03/2001:14:05:06"),
Ok(dateref().into())
);
}
#[cfg(unix)] // see https://github.com/timberio/vector/issues/1201
#[test]
fn timestamp_param_conversion() {
assert_eq!(
convert("timestamp|%Y-%m-%d %H:%M:%S", "2001-02-03 14:05:06"),
Ok(dateref().into())
);
}
#[cfg(unix)] // see https://github.com/timberio/vector/issues/1201
#[test]
fn parse_timestamp_auto() {
std::env::set_var("TZ", TIMEZONE);
assert_eq!(parse_timestamp("2001-02-03 14:05:06"), Ok(dateref()));
assert_eq!(parse_timestamp("02/03/2001:14:05:06"), Ok(dateref()));
assert_eq!(parse_timestamp("2001-02-03T14:05:06"), Ok(dateref()));
assert_eq!(parse_timestamp("2001-02-03T04:05:06Z"), Ok(dateref()));
assert_eq!(parse_timestamp("Sat, 3 Feb 2001 14:05:06"), Ok(dateref()));
assert_eq!(parse_timestamp("Sat Feb 3 14:05:06 2001"), Ok(dateref()));
assert_eq!(parse_timestamp("3-Feb-2001 14:05:06"), Ok(dateref()));
assert_eq!(parse_timestamp("2001-02-02T22:05:06-06:00"), Ok(dateref()));
assert_eq!(
parse_timestamp("Sat, 03 Feb 2001 07:05:06 +0300"),
Ok(dateref())
);
}
// These should perhaps each go into an individual test function to be
// able to determine what part failed, but that would end up really
// spamming the test logs.
#[test]
fn parse_bool_true() {
assert_eq!(parse_bool("true"), Ok(true));
assert_eq!(parse_bool("True"), Ok(true));
assert_eq!(parse_bool("t"), Ok(true));
assert_eq!(parse_bool("T"), Ok(true));
assert_eq!(parse_bool("yes"), Ok(true));
assert_eq!(parse_bool("YES"), Ok(true));
assert_eq!(parse_bool("y"), Ok(true));
assert_eq!(parse_bool("Y"), Ok(true));
assert_eq!(parse_bool("1"), Ok(true));
assert_eq!(parse_bool("23456"), Ok(true));
assert_eq!(parse_bool("-8"), Ok(true));
}
#[test]
fn parse_bool_false() {
assert_eq!(parse_bool("false"), Ok(false));
assert_eq!(parse_bool("fAlSE"), Ok(false));
assert_eq!(parse_bool("f"), Ok(false));
assert_eq!(parse_bool("F"), Ok(false));
assert_eq!(parse_bool("no"), Ok(false));
assert_eq!(parse_bool("NO"), Ok(false));
assert_eq!(parse_bool("n"), Ok(false));
assert_eq!(parse_bool("N"), Ok(false));
assert_eq!(parse_bool("0"), Ok(false));
assert_eq!(parse_bool("000"), Ok(false));
}
#[test]
fn parse_bool_errors() {
assert!(parse_bool("X").is_err());
assert!(parse_bool("yes or no").is_err());
assert!(parse_bool("123.4").is_err());
}
}
| 37.028249 | 96 | 0.56538 |
1d271d1530a75823fc7c3da83ab75e8d0eee0473 | 778 | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that we check enum bounds for WFedness.
#![feature(associated_type_defaults)]
#![feature(rustc_attrs)]
#![allow(dead_code)]
trait ExtraCopy<T:Copy> { }
enum SomeEnum<T,U> //~ WARN E0277
where T: ExtraCopy<U>
{
SomeVariant(T,U)
}
#[rustc_error]
fn main() { } //~ ERROR compilation successful
| 28.814815 | 68 | 0.72108 |
28536ee977d49f770a58ac35dc88505537f90f00 | 4,152 | use crate::{errors::RustusError, RustusResult};
use actix_web::http::header::HeaderValue;
use digest::Digest;
/// Checks if hash-sum of a slice matches the given checksum.
fn checksum_verify(algo: &str, bytes: &[u8], checksum: &[u8]) -> RustusResult<bool> {
match algo {
"sha1" => {
let sum = sha1::Sha1::digest(bytes);
Ok(sum.as_slice() == checksum)
}
"sha256" => {
let sum = sha2::Sha256::digest(bytes);
Ok(sum.as_slice() == checksum)
}
"sha512" => {
let sum = sha2::Sha512::digest(bytes);
Ok(sum.as_slice() == checksum)
}
"md5" => {
let sum = md5::Md5::digest(bytes);
Ok(sum.as_slice() == checksum)
}
_ => Err(RustusError::UnknownHashAlgorithm),
}
}
/// Verify checksum of a given chunk based on header's value.
///
/// This function decodes given header value.
/// Format of the header is:
/// <algorithm name> <base64 encoded checksum value>
///
/// It tries decode header value to string,
/// splits it in two parts and after decoding base64 checksum
/// verifies it.
///
/// # Errors
///
/// It may return error if header value can't be represented as string,
/// if checksum can't be decoded with base64 or if unknown algorithm is used.
pub fn verify_chunk_checksum(header: &HeaderValue, data: &[u8]) -> RustusResult<bool> {
if let Ok(val) = header.to_str() {
let mut split = val.split(' ');
if let Some(algo) = split.next() {
if let Some(checksum_base) = split.next() {
let checksum = base64::decode(checksum_base).map_err(|_| {
log::error!("Can't decode checksum value");
RustusError::WrongHeaderValue
})?;
return checksum_verify(algo, data, checksum.as_slice());
}
}
Err(RustusError::WrongHeaderValue)
} else {
log::error!("Can't decode checksum header.");
Err(RustusError::WrongHeaderValue)
}
}
#[cfg(test)]
mod tests {
use super::{checksum_verify, verify_chunk_checksum};
use actix_web::http::header::HeaderValue;
#[test]
fn test_success_checksum_verify() {
let res = checksum_verify(
"sha1",
b"hello",
b"\xaa\xf4\xc6\x1d\xdc\xc5\xe8\xa2\xda\xbe\xde\x0f;H,\xd9\xae\xa9CM",
)
.unwrap();
assert!(res);
let res = checksum_verify(
"sha256",
b"hello",
b",\xf2M\xba_\xb0\xa3\x0e&\xe8;*\xc5\xb9\xe2\x9e\x1b\x16\x1e\\\x1f\xa7B^s\x043b\x93\x8b\x98$",
).unwrap();
assert!(res);
let res = checksum_verify(
"sha512",
b"hello",
b"\x9bq\xd2$\xbdb\xf3x]\x96\xd4j\xd3\xea=s1\x9b\xfb\xc2\x89\x0c\xaa\xda\xe2\xdf\xf7%\x19g<\xa7##\xc3\xd9\x9b\xa5\xc1\x1d|z\xccn\x14\xb8\xc5\xda\x0cFcG\\.\\:\xde\xf4os\xbc\xde\xc0C",
).unwrap();
assert!(res);
let res =
checksum_verify("md5", b"hello", b"]A@*\xbcK*v\xb9q\x9d\x91\x10\x17\xc5\x92").unwrap();
assert!(res);
}
#[test]
fn test_sum_unknown_algo_checksum_verify() {
let res = checksum_verify("base64", "test".as_bytes(), b"dGVzdAo=");
assert!(res.is_err());
}
#[test]
fn test_success_verify_chunk_checksum() {
let res = verify_chunk_checksum(
&HeaderValue::from_str("md5 XUFAKrxLKna5cZ2REBfFkg==").unwrap(),
b"hello",
)
.unwrap();
assert!(res);
}
#[test]
fn test_wrong_checksum() {
let res = verify_chunk_checksum(&HeaderValue::from_str("md5 memes==").unwrap(), b"hello");
assert!(res.is_err());
}
#[test]
fn test_bytes_header() {
let res = verify_chunk_checksum(
&HeaderValue::from_bytes(b"ewq ]A@*\xbcK*v").unwrap(),
b"hello",
);
assert!(res.is_err());
}
#[test]
fn test_badly_formatted_header() {
let res = verify_chunk_checksum(&HeaderValue::from_str("md5").unwrap(), b"hello");
assert!(res.is_err());
}
}
| 32.186047 | 193 | 0.56238 |
8aa580ad2cc0d505ab8e27e0f944422eddbb7b2a | 2,870 | #[doc = "Register `IER` writer"]
pub struct W(crate::W<IER_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<IER_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<IER_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<IER_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `DRDY` writer - Data Ready Interrupt Enable"]
pub struct DRDY_W<'a> {
w: &'a mut W,
}
impl<'a> DRDY_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | ((value as u32 & 0x01) << 2);
self.w
}
}
#[doc = "Field `OVR` writer - Overrun Interrupt Enable"]
pub struct OVR_W<'a> {
w: &'a mut W,
}
impl<'a> OVR_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | ((value as u32 & 0x01) << 3);
self.w
}
}
impl W {
#[doc = "Bit 2 - Data Ready Interrupt Enable"]
#[inline(always)]
pub fn drdy(&mut self) -> DRDY_W {
DRDY_W { w: self }
}
#[doc = "Bit 3 - Overrun Interrupt Enable"]
#[inline(always)]
pub fn ovr(&mut self) -> OVR_W {
OVR_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "Interrupt Enable Register\n\nThis register you can [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [ier](index.html) module"]
pub struct IER_SPEC;
impl crate::RegisterSpec for IER_SPEC {
type Ux = u32;
}
#[doc = "`write(|w| ..)` method takes [ier::W](W) writer structure"]
impl crate::Writable for IER_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets IER to value 0"]
impl crate::Resettable for IER_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
| 28.7 | 331 | 0.562718 |
292e9361f3082472aa5f700510538207a0c94f2d | 5,016 | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! Suppose we have the following data structure in a smart contract:
//!
//! struct B {
//! Map<String, String> mymap;
//! }
//!
//! struct A {
//! B b;
//! int my_int;
//! }
//!
//! struct C {
//! List<int> mylist;
//! }
//!
//! A a;
//! C c;
//!
//! and the data belongs to Alice. Then an access to `a.b.mymap` would be translated to an access
//! to an entry in key-value store whose key is `<Alice>/a/b/mymap`. In the same way, the access to
//! `c.mylist` would need to query `<Alice>/c/mylist`.
//!
//! So an account stores its data in a directory structure, for example:
//! <Alice>/balance: 10
//! <Alice>/a/b/mymap: {"Bob" => "abcd", "Carol" => "efgh"}
//! <Alice>/a/myint: 20
//! <Alice>/c/mylist: [3, 5, 7, 9]
//!
//! If someone needs to query the map above and find out what value associated with "Bob" is,
//! `address` will be set to Alice and `path` will be set to "/a/b/mymap/Bob".
//!
//! On the other hand, if you want to query only <Alice>/a/*, `address` will be set to Alice and
//! `path` will be set to "/a" and use the `get_prefix()` method from statedb
use crate::account_address::AccountAddress;
use aptos_crypto::hash::HashValue;
use move_core_types::language_storage::{ModuleId, ResourceKey, StructTag, CODE_TAG, RESOURCE_TAG};
#[cfg(any(test, feature = "fuzzing"))]
use proptest_derive::Arbitrary;
use serde::{Deserialize, Serialize};
use std::{convert::TryFrom, fmt};
#[derive(Clone, Eq, PartialEq, Hash, Serialize, Deserialize, Ord, PartialOrd)]
#[cfg_attr(any(test, feature = "fuzzing"), derive(Arbitrary))]
pub struct AccessPath {
pub address: AccountAddress,
#[serde(with = "serde_bytes")]
pub path: Vec<u8>,
}
#[derive(Clone, Eq, PartialEq, Hash, Serialize, Deserialize, Ord, PartialOrd)]
pub enum Path {
Code(ModuleId),
Resource(StructTag),
}
impl AccessPath {
pub fn new(address: AccountAddress, path: Vec<u8>) -> Self {
AccessPath { address, path }
}
pub fn resource_access_vec(tag: StructTag) -> Vec<u8> {
bcs::to_bytes(&Path::Resource(tag)).expect("Unexpected serialization error")
}
/// Convert Accesses into a byte offset which would be used by the storage layer to resolve
/// where fields are stored.
pub fn resource_access_path(key: ResourceKey) -> AccessPath {
let path = AccessPath::resource_access_vec(key.type_);
AccessPath {
address: key.address,
path,
}
}
fn code_access_path_vec(key: ModuleId) -> Vec<u8> {
bcs::to_bytes(&Path::Code(key)).expect("Unexpected serialization error")
}
pub fn code_access_path(key: ModuleId) -> AccessPath {
let address = *key.address();
let path = AccessPath::code_access_path_vec(key);
AccessPath { address, path }
}
/// Extract the structured resource or module `Path` from `self`
pub fn get_path(&self) -> Path {
bcs::from_bytes::<Path>(&self.path).expect("Unexpected serialization error")
}
/// Extract a StructTag from `self`. Returns Some if this is a resource access
/// path and None otherwise
pub fn get_struct_tag(&self) -> Option<StructTag> {
match self.get_path() {
Path::Resource(s) => Some(s),
Path::Code(_) => None,
}
}
}
impl fmt::Debug for AccessPath {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"AccessPath {{ address: {:x}, path: {} }}",
self.address,
hex::encode(&self.path)
)
}
}
impl fmt::Display for AccessPath {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.path.len() < 1 + HashValue::LENGTH {
write!(f, "{:?}", self)
} else {
write!(f, "AccessPath {{ address: {:x}, ", self.address)?;
match self.path[0] {
RESOURCE_TAG => write!(f, "type: Resource, ")?,
CODE_TAG => write!(f, "type: Module, ")?,
tag => write!(f, "type: {:?}, ", tag)?,
};
write!(
f,
"hash: {:?}, ",
hex::encode(&self.path[1..=HashValue::LENGTH])
)?;
write!(
f,
"suffix: {:?} }} ",
String::from_utf8_lossy(&self.path[1 + HashValue::LENGTH..])
)
}
}
}
impl From<&ModuleId> for AccessPath {
fn from(id: &ModuleId) -> AccessPath {
AccessPath {
address: *id.address(),
path: id.access_vector(),
}
}
}
impl TryFrom<&[u8]> for Path {
type Error = bcs::Error;
fn try_from(bytes: &[u8]) -> Result<Self, Self::Error> {
bcs::from_bytes::<Path>(bytes)
}
}
impl TryFrom<&Vec<u8>> for Path {
type Error = bcs::Error;
fn try_from(bytes: &Vec<u8>) -> Result<Self, Self::Error> {
bcs::from_bytes::<Path>(bytes)
}
}
| 30.585366 | 99 | 0.576954 |
b9acf9a86971103589e07596faf5abaeaaac0be1 | 9,120 | #![allow(unused_imports, non_camel_case_types)]
use crate::models::r4::Element::Element;
use crate::models::r4::Extension::Extension;
use crate::models::r4::Reference::Reference;
use serde_json::json;
use serde_json::value::Value;
use std::borrow::Cow;
/// A set of healthcare-related information that is assembled together into a single
/// logical package that provides a single coherent statement of meaning, establishes
/// its own context and that has clinical attestation with regard to who is making the
/// statement. A Composition defines the structure and narrative content necessary for
/// a document. However, a Composition alone does not constitute a document. Rather,
/// the Composition must be the first entry in a Bundle where Bundle.type=document,
/// and any other resources referenced from Composition must be included as subsequent
/// entries in the Bundle (for example Patient, Practitioner, Encounter, etc.).
#[derive(Debug)]
pub struct Composition_Attester<'a> {
pub(crate) value: Cow<'a, Value>,
}
impl Composition_Attester<'_> {
pub fn new(value: &Value) -> Composition_Attester {
Composition_Attester {
value: Cow::Borrowed(value),
}
}
pub fn to_json(&self) -> Value {
(*self.value).clone()
}
/// Extensions for mode
pub fn _mode(&self) -> Option<Element> {
if let Some(val) = self.value.get("_mode") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// Extensions for time
pub fn _time(&self) -> Option<Element> {
if let Some(val) = self.value.get("_time") {
return Some(Element {
value: Cow::Borrowed(val),
});
}
return None;
}
/// May be used to represent additional information that is not part of the basic
/// definition of the element. To make the use of extensions safe and manageable,
/// there is a strict set of governance applied to the definition and use of
/// extensions. Though any implementer can define an extension, there is a set of
/// requirements that SHALL be met as part of the definition of the extension.
pub fn extension(&self) -> Option<Vec<Extension>> {
if let Some(Value::Array(val)) = self.value.get("extension") {
return Some(
val.into_iter()
.map(|e| Extension {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// Unique id for the element within a resource (for internal references). This may be
/// any string value that does not contain spaces.
pub fn id(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("id") {
return Some(string);
}
return None;
}
/// The type of attestation the authenticator offers.
pub fn mode(&self) -> Option<Composition_AttesterMode> {
if let Some(Value::String(val)) = self.value.get("mode") {
return Some(Composition_AttesterMode::from_string(&val).unwrap());
}
return None;
}
/// May be used to represent additional information that is not part of the basic
/// definition of the element and that modifies the understanding of the element
/// in which it is contained and/or the understanding of the containing element's
/// descendants. Usually modifier elements provide negation or qualification. To make
/// the use of extensions safe and manageable, there is a strict set of governance
/// applied to the definition and use of extensions. Though any implementer can define
/// an extension, there is a set of requirements that SHALL be met as part of the
/// definition of the extension. Applications processing a resource are required to
/// check for modifier extensions. Modifier extensions SHALL NOT change the meaning
/// of any elements on Resource or DomainResource (including cannot change the meaning
/// of modifierExtension itself).
pub fn modifier_extension(&self) -> Option<Vec<Extension>> {
if let Some(Value::Array(val)) = self.value.get("modifierExtension") {
return Some(
val.into_iter()
.map(|e| Extension {
value: Cow::Borrowed(e),
})
.collect::<Vec<_>>(),
);
}
return None;
}
/// Who attested the composition in the specified way.
pub fn party(&self) -> Option<Reference> {
if let Some(val) = self.value.get("party") {
return Some(Reference {
value: Cow::Borrowed(val),
});
}
return None;
}
/// When the composition was attested by the party.
pub fn time(&self) -> Option<&str> {
if let Some(Value::String(string)) = self.value.get("time") {
return Some(string);
}
return None;
}
pub fn validate(&self) -> bool {
if let Some(_val) = self._mode() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self._time() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.extension() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.id() {}
if let Some(_val) = self.mode() {}
if let Some(_val) = self.modifier_extension() {
if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) {
return false;
}
}
if let Some(_val) = self.party() {
if !_val.validate() {
return false;
}
}
if let Some(_val) = self.time() {}
return true;
}
}
#[derive(Debug)]
pub struct Composition_AttesterBuilder {
pub(crate) value: Value,
}
impl Composition_AttesterBuilder {
pub fn build(&self) -> Composition_Attester {
Composition_Attester {
value: Cow::Owned(self.value.clone()),
}
}
pub fn with(existing: Composition_Attester) -> Composition_AttesterBuilder {
Composition_AttesterBuilder {
value: (*existing.value).clone(),
}
}
pub fn new() -> Composition_AttesterBuilder {
let mut __value: Value = json!({});
return Composition_AttesterBuilder { value: __value };
}
pub fn _mode<'a>(&'a mut self, val: Element) -> &'a mut Composition_AttesterBuilder {
self.value["_mode"] = json!(val.value);
return self;
}
pub fn _time<'a>(&'a mut self, val: Element) -> &'a mut Composition_AttesterBuilder {
self.value["_time"] = json!(val.value);
return self;
}
pub fn extension<'a>(&'a mut self, val: Vec<Extension>) -> &'a mut Composition_AttesterBuilder {
self.value["extension"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn id<'a>(&'a mut self, val: &str) -> &'a mut Composition_AttesterBuilder {
self.value["id"] = json!(val);
return self;
}
pub fn mode<'a>(
&'a mut self,
val: Composition_AttesterMode,
) -> &'a mut Composition_AttesterBuilder {
self.value["mode"] = json!(val.to_string());
return self;
}
pub fn modifier_extension<'a>(
&'a mut self,
val: Vec<Extension>,
) -> &'a mut Composition_AttesterBuilder {
self.value["modifierExtension"] =
json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>());
return self;
}
pub fn party<'a>(&'a mut self, val: Reference) -> &'a mut Composition_AttesterBuilder {
self.value["party"] = json!(val.value);
return self;
}
pub fn time<'a>(&'a mut self, val: &str) -> &'a mut Composition_AttesterBuilder {
self.value["time"] = json!(val);
return self;
}
}
#[derive(Debug)]
pub enum Composition_AttesterMode {
Personal,
Professional,
Legal,
Official,
}
impl Composition_AttesterMode {
pub fn from_string(string: &str) -> Option<Composition_AttesterMode> {
match string {
"personal" => Some(Composition_AttesterMode::Personal),
"professional" => Some(Composition_AttesterMode::Professional),
"legal" => Some(Composition_AttesterMode::Legal),
"official" => Some(Composition_AttesterMode::Official),
_ => None,
}
}
pub fn to_string(&self) -> String {
match self {
Composition_AttesterMode::Personal => "personal".to_string(),
Composition_AttesterMode::Professional => "professional".to_string(),
Composition_AttesterMode::Legal => "legal".to_string(),
Composition_AttesterMode::Official => "official".to_string(),
}
}
}
| 34.545455 | 100 | 0.588268 |
64ca36765fbd55e48d5fc4db2be6e235327cf407 | 419 | pub struct Bound {
element: f64,
contains: bool,
}
impl Bound {
pub fn init(element: f64, contains: bool) -> Self {
Self { element, contains }
}
pub fn is_contains_bound(contains: &str) -> bool {
contains.contains("[") || contains.contains("]")
}
pub fn element(&self) -> f64 {
self.element
}
pub fn contains(&self) -> bool {
self.contains
}
}
| 19.045455 | 56 | 0.551313 |
de391b3e6f172e142a533929042ec4a41594837e | 3,811 | //! Contains the ingredients needed to create wrappers over tokio AsyncRead/AsyncWrite items
//! to automatically reconnect upon failures. This is done so that a user can use them without worrying
//! that their application logic will terminate simply due to an event like a temporary network failure.
//!
//! This crate will try to provide commonly used io items, for example, the [StubbornTcpStream](StubbornTcpStream).
//! If you need to create your own, you simply need to implement the [UnderlyingIo](crate::tokio::UnderlyingIo) trait.
//! Once implemented, you can construct it easily by creating a [StubbornIo](crate::tokio::StubbornIo) type as seen below.
//!
//! #### Compiler Warning
//! This crate only works on **nightly**, as it is dependent on async/await.
//! Once that is stabilized in Rust 1.38, it will work in regular Rust.
//!
//! ### Motivations
//! This crate was created because I was working on a service that needed to fetch data from a remote server
//! via a tokio TcpConnection. It normally worked perfectly (as does all of my code ☺), but every time the
//! remote server had a restart or turnaround, my application logic would stop working.
//! **stubborn-io** was born because I did not want to complicate my service's logic with TcpStream
//! reconnect and disconnect handling code. With stubborn-io, I can keep the service exactly the same,
//! knowing that the StubbornTcpStream's sensible defaults will perform reconnects in a way to keep my service running.
//! Once I realized that the implementation could apply to all IO items and not just TcpStream, I made it customizable as
//! seen below.
//!
//! ## Example on how a Stubborn IO item might be created
//! ``` ignore
//! use std::io;
//! use std::future::Future;
//! use std::path::PathBuf;
//! use std::pin::Pin;
//! use stubborn_io::tokio::{StubbornIo, UnderlyingIo};
//! use tokio::fs::File;
//!
//! impl UnderlyingIo<PathBuf> for File {
//! // Establishes an io connection.
//! // Additionally, this will be used when reconnect tries are attempted.
//! fn establish(path: PathBuf) -> Pin<Box<dyn Future<Output = io::Result<Self>> + Send>> {
//! Box::pin(async move {
//! // In this case, we are trying to "connect" a file that
//! // should exist on the system
//! Ok(File::open(path).await?)
//! })
//! }
//! }
//!
//! // Because StubbornIo implements deref, you are able to invoke
//! // the original methods on the File struct.
//! type HomemadeStubbornFile = StubbornIo<File, PathBuf>;
//! let path = PathBuf::from("./foo/bar.txt");
//!
//! let stubborn_file = HomemadeStubbornFile::connect(&path).await?;
//! // ... application logic here
//! ```
pub mod config;
// in the future, there may be a mod for synchronous regular io too, which is why
// tokio is specifically chosen to place the async stuff
pub mod tokio;
#[doc(inline)]
pub use self::config::ReconnectOptions;
#[doc(inline)]
pub use self::tokio::StubbornTcpStream;
// needed because the above doc example can't compile due to the fact that a consumer of this crate
// does not own the struct for tokio::fs::File.
#[test]
fn test_compilation_for_doc_example() {
use self::tokio::{StubbornIo, UnderlyingIo};
use ::tokio::fs::File;
use std::future::Future;
use std::io;
use std::path::PathBuf;
use std::pin::Pin;
impl UnderlyingIo<PathBuf> for File {
// Implementing the creation function that will be used to establish an io connection.
fn establish(path: PathBuf) -> Pin<Box<dyn Future<Output = io::Result<Self>> + Send>> {
Box::pin(async move { Ok(File::open(path).await?) })
}
}
type HomemadeStubbornFile = StubbornIo<File, PathBuf>;
let _ = HomemadeStubbornFile::connect(PathBuf::from("foo"));
}
| 44.835294 | 122 | 0.69142 |
1dd644a0a1577ca04dd6fb45cc9674d48c58cd1c | 440 | //! # Peggy's Compiler
//!
//! This module contains the compiler, which turns Peggy grammars to syntax trees.
//!
//! These can then be used either with the [generators](`crate::generators`), or with the [built-in runtime](`crate::runtime`).
pub mod data;
mod errors;
mod parser;
mod report;
mod singles;
pub(crate) mod utils;
mod validator;
pub use data::*;
pub use errors::*;
pub use parser::*;
pub use report::*;
pub use validator::*;
| 22 | 127 | 0.695455 |
ffa6f03909b023a07eb3fabf491b41c96934b943 | 600 | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// pp-exact
fn from_foreign_fn(_x: fn()) { }
fn from_stack_closure(_x: ||) { }
fn from_unique_closure(_x: proc()) { }
fn main() { }
| 35.294118 | 68 | 0.718333 |
ddec715c73bf874c4708d3a58fa3eef2c2a14261 | 24,178 | // Copyright 2021 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod store_key;
use std::{
collections::BTreeSet,
convert::TryFrom,
path::{Path, PathBuf},
sync::Arc,
time::SystemTime,
};
use futures::{
stream::{self, Stream},
TryStreamExt,
};
use matrix_sdk_common::{
async_trait,
events::{
presence::PresenceEvent,
room::member::{MemberEventContent, MembershipState},
AnySyncStateEvent, EventContent, EventType,
},
identifiers::{RoomId, UserId},
};
use serde::{Deserialize, Serialize};
use sled::{
transaction::{ConflictableTransactionError, TransactionError},
Config, Db, Transactional, Tree,
};
use tracing::info;
use crate::deserialized_responses::MemberEvent;
use self::store_key::{EncryptedEvent, StoreKey};
use super::{Result, RoomInfo, StateChanges, StateStore, StoreError};
#[derive(Debug, Serialize, Deserialize)]
pub enum DatabaseType {
Unencrypted,
Encrypted(store_key::EncryptedStoreKey),
}
#[derive(Debug, thiserror::Error)]
pub enum SerializationError {
#[error(transparent)]
Json(#[from] serde_json::Error),
#[error(transparent)]
Encryption(#[from] store_key::Error),
}
impl From<TransactionError<SerializationError>> for StoreError {
fn from(e: TransactionError<SerializationError>) -> Self {
match e {
TransactionError::Abort(e) => e.into(),
TransactionError::Storage(e) => StoreError::Sled(e),
}
}
}
impl From<SerializationError> for StoreError {
fn from(e: SerializationError) -> Self {
match e {
SerializationError::Json(e) => StoreError::Json(e),
SerializationError::Encryption(e) => match e {
store_key::Error::Random(e) => StoreError::Encryption(e.to_string()),
store_key::Error::Serialization(e) => StoreError::Json(e),
store_key::Error::Encryption(e) => StoreError::Encryption(e),
},
}
}
}
trait EncodeKey {
const SEPARATOR: u8 = 0xff;
fn encode(&self) -> Vec<u8>;
}
impl EncodeKey for &UserId {
fn encode(&self) -> Vec<u8> {
self.as_str().encode()
}
}
impl EncodeKey for &RoomId {
fn encode(&self) -> Vec<u8> {
self.as_str().encode()
}
}
impl EncodeKey for &str {
fn encode(&self) -> Vec<u8> {
[self.as_bytes(), &[Self::SEPARATOR]].concat()
}
}
impl EncodeKey for (&str, &str) {
fn encode(&self) -> Vec<u8> {
[
self.0.as_bytes(),
&[Self::SEPARATOR],
self.1.as_bytes(),
&[Self::SEPARATOR],
]
.concat()
}
}
impl EncodeKey for (&str, &str, &str) {
fn encode(&self) -> Vec<u8> {
[
self.0.as_bytes(),
&[Self::SEPARATOR],
self.1.as_bytes(),
&[Self::SEPARATOR],
self.2.as_bytes(),
&[Self::SEPARATOR],
]
.concat()
}
}
#[derive(Clone)]
pub struct SledStore {
path: Option<PathBuf>,
pub(crate) inner: Db,
store_key: Arc<Option<StoreKey>>,
session: Tree,
account_data: Tree,
members: Tree,
profiles: Tree,
display_names: Tree,
joined_user_ids: Tree,
invited_user_ids: Tree,
room_info: Tree,
room_state: Tree,
room_account_data: Tree,
stripped_room_info: Tree,
stripped_room_state: Tree,
stripped_members: Tree,
presence: Tree,
}
impl std::fmt::Debug for SledStore {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if let Some(path) = &self.path {
f.debug_struct("SledStore").field("path", &path).finish()
} else {
f.debug_struct("SledStore")
.field("path", &"memory store")
.finish()
}
}
}
impl SledStore {
fn open_helper(db: Db, path: Option<PathBuf>, store_key: Option<StoreKey>) -> Result<Self> {
let session = db.open_tree("session")?;
let account_data = db.open_tree("account_data")?;
let members = db.open_tree("members")?;
let profiles = db.open_tree("profiles")?;
let display_names = db.open_tree("display_names")?;
let joined_user_ids = db.open_tree("joined_user_ids")?;
let invited_user_ids = db.open_tree("invited_user_ids")?;
let room_state = db.open_tree("room_state")?;
let room_info = db.open_tree("room_infos")?;
let presence = db.open_tree("presence")?;
let room_account_data = db.open_tree("room_account_data")?;
let stripped_room_info = db.open_tree("stripped_room_info")?;
let stripped_members = db.open_tree("stripped_members")?;
let stripped_room_state = db.open_tree("stripped_room_state")?;
Ok(Self {
path,
inner: db,
store_key: store_key.into(),
session,
account_data,
members,
profiles,
display_names,
joined_user_ids,
invited_user_ids,
room_account_data,
presence,
room_state,
room_info,
stripped_room_info,
stripped_members,
stripped_room_state,
})
}
pub fn open() -> Result<Self> {
let db = Config::new().temporary(true).open()?;
SledStore::open_helper(db, None, None)
}
pub fn open_with_passphrase(path: impl AsRef<Path>, passphrase: &str) -> Result<Self> {
let path = path.as_ref().join("matrix-sdk-state");
let db = Config::new().temporary(false).path(&path).open()?;
let store_key: Option<DatabaseType> = db
.get("store_key".encode())?
.map(|k| serde_json::from_slice(&k).map_err(StoreError::Json))
.transpose()?;
let store_key = if let Some(key) = store_key {
if let DatabaseType::Encrypted(k) = key {
StoreKey::import(passphrase, k).map_err(|_| StoreError::StoreLocked)?
} else {
return Err(StoreError::UnencryptedStore);
}
} else {
let key = StoreKey::new().map_err::<StoreError, _>(|e| e.into())?;
let encrypted_key = DatabaseType::Encrypted(
key.export(passphrase)
.map_err::<StoreError, _>(|e| e.into())?,
);
db.insert("store_key".encode(), serde_json::to_vec(&encrypted_key)?)?;
key
};
SledStore::open_helper(db, Some(path), Some(store_key))
}
pub fn open_with_path(path: impl AsRef<Path>) -> Result<Self> {
let path = path.as_ref().join("matrix-sdk-state");
let db = Config::new().temporary(false).path(&path).open()?;
SledStore::open_helper(db, Some(path), None)
}
fn serialize_event(
&self,
event: &impl Serialize,
) -> std::result::Result<Vec<u8>, SerializationError> {
if let Some(key) = &*self.store_key {
let encrypted = key.encrypt(event)?;
Ok(serde_json::to_vec(&encrypted)?)
} else {
Ok(serde_json::to_vec(event)?)
}
}
fn deserialize_event<T: for<'b> Deserialize<'b>>(
&self,
event: &[u8],
) -> std::result::Result<T, SerializationError> {
if let Some(key) = &*self.store_key {
let encrypted: EncryptedEvent = serde_json::from_slice(&event)?;
Ok(key.decrypt(encrypted)?)
} else {
Ok(serde_json::from_slice(event)?)
}
}
pub async fn save_filter(&self, filter_name: &str, filter_id: &str) -> Result<()> {
self.session
.insert(("filter", filter_name).encode(), filter_id)?;
Ok(())
}
pub async fn get_filter(&self, filter_name: &str) -> Result<Option<String>> {
Ok(self
.session
.get(("filter", filter_name).encode())?
.map(|f| String::from_utf8_lossy(&f).to_string()))
}
pub async fn get_sync_token(&self) -> Result<Option<String>> {
Ok(self
.session
.get("sync_token".encode())?
.map(|t| String::from_utf8_lossy(&t).to_string()))
}
pub async fn save_changes(&self, changes: &StateChanges) -> Result<()> {
let now = SystemTime::now();
let ret: std::result::Result<(), TransactionError<SerializationError>> = (
&self.session,
&self.account_data,
&self.members,
&self.profiles,
&self.display_names,
&self.joined_user_ids,
&self.invited_user_ids,
&self.room_info,
&self.room_state,
&self.room_account_data,
&self.presence,
&self.stripped_room_info,
&self.stripped_members,
&self.stripped_room_state,
)
.transaction(
|(
session,
account_data,
members,
profiles,
display_names,
joined,
invited,
rooms,
state,
room_account_data,
presence,
striped_rooms,
stripped_members,
stripped_state,
)| {
if let Some(s) = &changes.sync_token {
session.insert("sync_token".encode(), s.as_str())?;
}
for (room, events) in &changes.members {
let profile_changes = changes.profiles.get(room);
for event in events.values() {
let key = (room.as_str(), event.state_key.as_str()).encode();
match event.content.membership {
MembershipState::Join => {
joined.insert(key.as_slice(), event.state_key.as_str())?;
invited.remove(key.as_slice())?;
}
MembershipState::Invite => {
invited.insert(key.as_slice(), event.state_key.as_str())?;
joined.remove(key.as_slice())?;
}
_ => {
joined.remove(key.as_slice())?;
invited.remove(key.as_slice())?;
}
}
members.insert(
key.as_slice(),
self.serialize_event(&event)
.map_err(ConflictableTransactionError::Abort)?,
)?;
if let Some(profile) =
profile_changes.and_then(|p| p.get(&event.state_key))
{
profiles.insert(
key.as_slice(),
self.serialize_event(&profile)
.map_err(ConflictableTransactionError::Abort)?,
)?;
}
}
}
for (room_id, ambiguity_maps) in &changes.ambiguity_maps {
for (display_name, map) in ambiguity_maps {
display_names.insert(
(room_id.as_str(), display_name.as_str()).encode(),
self.serialize_event(&map)
.map_err(ConflictableTransactionError::Abort)?,
)?;
}
}
for (event_type, event) in &changes.account_data {
account_data.insert(
event_type.as_str().encode(),
self.serialize_event(&event)
.map_err(ConflictableTransactionError::Abort)?,
)?;
}
for (room, events) in &changes.room_account_data {
for (event_type, event) in events {
room_account_data.insert(
(room.as_str(), event_type.as_str()).encode(),
self.serialize_event(&event)
.map_err(ConflictableTransactionError::Abort)?,
)?;
}
}
for (room, event_types) in &changes.state {
for events in event_types.values() {
for event in events.values() {
state.insert(
(
room.as_str(),
event.content().event_type(),
event.state_key(),
)
.encode(),
self.serialize_event(&event)
.map_err(ConflictableTransactionError::Abort)?,
)?;
}
}
}
for (room_id, room_info) in &changes.room_infos {
rooms.insert(
room_id.encode(),
self.serialize_event(room_info)
.map_err(ConflictableTransactionError::Abort)?,
)?;
}
for (sender, event) in &changes.presence {
presence.insert(
sender.encode(),
self.serialize_event(&event)
.map_err(ConflictableTransactionError::Abort)?,
)?;
}
for (room_id, info) in &changes.invited_room_info {
striped_rooms.insert(
room_id.encode(),
self.serialize_event(&info)
.map_err(ConflictableTransactionError::Abort)?,
)?;
}
for (room, events) in &changes.stripped_members {
for event in events.values() {
stripped_members.insert(
(room.as_str(), event.state_key.as_str()).encode(),
self.serialize_event(&event)
.map_err(ConflictableTransactionError::Abort)?,
)?;
}
}
for (room, event_types) in &changes.stripped_state {
for events in event_types.values() {
for event in events.values() {
stripped_state.insert(
(
room.as_str(),
event.content().event_type(),
event.state_key(),
)
.encode(),
self.serialize_event(&event)
.map_err(ConflictableTransactionError::Abort)?,
)?;
}
}
}
Ok(())
},
);
ret?;
self.inner.flush_async().await?;
info!("Saved changes in {:?}", now.elapsed());
Ok(())
}
pub async fn get_presence_event(&self, user_id: &UserId) -> Result<Option<PresenceEvent>> {
Ok(self
.presence
.get(user_id.encode())?
.map(|e| self.deserialize_event(&e))
.transpose()?)
}
pub async fn get_state_event(
&self,
room_id: &RoomId,
event_type: EventType,
state_key: &str,
) -> Result<Option<AnySyncStateEvent>> {
Ok(self
.room_state
.get((room_id.as_str(), event_type.to_string().as_str(), state_key).encode())?
.map(|e| self.deserialize_event(&e))
.transpose()?)
}
pub async fn get_profile(
&self,
room_id: &RoomId,
user_id: &UserId,
) -> Result<Option<MemberEventContent>> {
Ok(self
.profiles
.get((room_id.as_str(), user_id.as_str()).encode())?
.map(|p| self.deserialize_event(&p))
.transpose()?)
}
pub async fn get_member_event(
&self,
room_id: &RoomId,
state_key: &UserId,
) -> Result<Option<MemberEvent>> {
Ok(self
.members
.get((room_id.as_str(), state_key.as_str()).encode())?
.map(|v| self.deserialize_event(&v))
.transpose()?)
}
pub async fn get_user_ids(&self, room_id: &RoomId) -> impl Stream<Item = Result<UserId>> {
stream::iter(self.members.scan_prefix(room_id.encode()).map(|u| {
UserId::try_from(String::from_utf8_lossy(&u?.1).to_string())
.map_err(StoreError::Identifier)
}))
}
pub async fn get_invited_user_ids(
&self,
room_id: &RoomId,
) -> impl Stream<Item = Result<UserId>> {
stream::iter(
self.invited_user_ids
.scan_prefix(room_id.encode())
.map(|u| {
UserId::try_from(String::from_utf8_lossy(&u?.1).to_string())
.map_err(StoreError::Identifier)
}),
)
}
pub async fn get_joined_user_ids(
&self,
room_id: &RoomId,
) -> impl Stream<Item = Result<UserId>> {
stream::iter(self.joined_user_ids.scan_prefix(room_id.encode()).map(|u| {
UserId::try_from(String::from_utf8_lossy(&u?.1).to_string())
.map_err(StoreError::Identifier)
}))
}
pub async fn get_room_infos(&self) -> impl Stream<Item = Result<RoomInfo>> {
let db = self.clone();
stream::iter(
self.room_info
.iter()
.map(move |r| db.deserialize_event(&r?.1).map_err(|e| e.into())),
)
}
pub async fn get_stripped_room_infos(&self) -> impl Stream<Item = Result<RoomInfo>> {
let db = self.clone();
stream::iter(
self.stripped_room_info
.iter()
.map(move |r| db.deserialize_event(&r?.1).map_err(|e| e.into())),
)
}
pub async fn get_users_with_display_name(
&self,
room_id: &RoomId,
display_name: &str,
) -> Result<BTreeSet<UserId>> {
let key = (room_id.as_str(), display_name).encode();
Ok(self
.display_names
.get(key)?
.map(|m| self.deserialize_event(&m))
.transpose()?
.unwrap_or_default())
}
}
#[async_trait]
impl StateStore for SledStore {
async fn save_filter(&self, filter_name: &str, filter_id: &str) -> Result<()> {
self.save_filter(filter_name, filter_id).await
}
async fn save_changes(&self, changes: &StateChanges) -> Result<()> {
self.save_changes(changes).await
}
async fn get_filter(&self, filter_id: &str) -> Result<Option<String>> {
self.get_filter(filter_id).await
}
async fn get_sync_token(&self) -> Result<Option<String>> {
self.get_sync_token().await
}
async fn get_presence_event(&self, user_id: &UserId) -> Result<Option<PresenceEvent>> {
self.get_presence_event(user_id).await
}
async fn get_state_event(
&self,
room_id: &RoomId,
event_type: EventType,
state_key: &str,
) -> Result<Option<AnySyncStateEvent>> {
self.get_state_event(room_id, event_type, state_key).await
}
async fn get_profile(
&self,
room_id: &RoomId,
user_id: &UserId,
) -> Result<Option<MemberEventContent>> {
self.get_profile(room_id, user_id).await
}
async fn get_member_event(
&self,
room_id: &RoomId,
state_key: &UserId,
) -> Result<Option<MemberEvent>> {
self.get_member_event(room_id, state_key).await
}
async fn get_user_ids(&self, room_id: &RoomId) -> Result<Vec<UserId>> {
self.get_user_ids(room_id).await.try_collect().await
}
async fn get_invited_user_ids(&self, room_id: &RoomId) -> Result<Vec<UserId>> {
self.get_invited_user_ids(room_id).await.try_collect().await
}
async fn get_joined_user_ids(&self, room_id: &RoomId) -> Result<Vec<UserId>> {
self.get_joined_user_ids(room_id).await.try_collect().await
}
async fn get_room_infos(&self) -> Result<Vec<RoomInfo>> {
self.get_room_infos().await.try_collect().await
}
async fn get_stripped_room_infos(&self) -> Result<Vec<RoomInfo>> {
self.get_stripped_room_infos().await.try_collect().await
}
async fn get_users_with_display_name(
&self,
room_id: &RoomId,
display_name: &str,
) -> Result<BTreeSet<UserId>> {
self.get_users_with_display_name(room_id, display_name)
.await
}
}
#[cfg(test)]
mod test {
use std::{convert::TryFrom, time::SystemTime};
use matrix_sdk_common::{
events::{
room::member::{MemberEventContent, MembershipState},
Unsigned,
},
identifiers::{room_id, user_id, EventId, UserId},
};
use matrix_sdk_test::async_test;
use super::{SledStore, StateChanges};
use crate::deserialized_responses::MemberEvent;
fn user_id() -> UserId {
user_id!("@example:localhost")
}
fn membership_event() -> MemberEvent {
let content = MemberEventContent {
avatar_url: None,
displayname: None,
is_direct: None,
third_party_invite: None,
membership: MembershipState::Join,
};
MemberEvent {
event_id: EventId::try_from("$h29iv0s8:example.com").unwrap(),
content,
sender: user_id(),
origin_server_ts: SystemTime::now(),
state_key: user_id(),
prev_content: None,
unsigned: Unsigned::default(),
}
}
#[async_test]
async fn test_member_saving() {
let store = SledStore::open().unwrap();
let room_id = room_id!("!test:localhost");
let user_id = user_id();
assert!(store
.get_member_event(&room_id, &user_id)
.await
.unwrap()
.is_none());
let mut changes = StateChanges::default();
changes
.members
.entry(room_id.clone())
.or_default()
.insert(user_id.clone(), membership_event());
store.save_changes(&changes).await.unwrap();
assert!(store
.get_member_event(&room_id, &user_id)
.await
.unwrap()
.is_some());
}
}
| 32.761518 | 96 | 0.499545 |
ef1b9ac33956be04c69963d3ec2ca83fb4562c0f | 660 | use crypto_hash::{self, Algorithm, Hasher};
use hex;
use std::fs::File;
use std::io;
use std::io::{BufReader, Read, Write};
use std::path::Path;
const BUFFER_SIZE: usize = 4096;
const HASH_ALGORITHM: Algorithm = Algorithm::SHA256;
pub fn file<P>(path: P) -> io::Result<String>
where
P: AsRef<Path>,
{
let mut buf = [0; BUFFER_SIZE];
let mut hasher = Hasher::new(HASH_ALGORITHM);
let mut reader = BufReader::with_capacity(BUFFER_SIZE * 8, File::open(&path)?);
loop {
match reader.read(&mut buf)? {
0 => break,
count => hasher.write(&buf[..count])?,
};
}
Ok(hex::encode(hasher.finish()))
}
| 23.571429 | 83 | 0.610606 |
5dc91dd52143c4840b178be4932df8af57cf7e38 | 2,303 | use serde::Deserialize;
use crate::repository::Error;
#[derive(Deserialize, Debug, Clone)]
struct ImageDetails {
architecture: String,
os: String,
variant: Option<String>,
size: usize,
}
#[derive(Deserialize, Clone)]
pub struct Images {
images: Vec<ImageDetails>,
#[serde(rename(deserialize = "name"))]
tag_name: String,
last_updated: String,
}
impl Images {
pub fn convert(&self) -> super::Tag {
super::Tag {
name: self.tag_name.clone(),
last_updated: Some(self.last_updated.clone()),
details: self
.images
.iter()
.map(|d| super::TagDetails {
arch: Some(format!(
"{}{}",
d.architecture.clone(),
d.variant.clone().unwrap_or_default()
)),
os: Some(d.os.clone()),
size: Some(d.size),
})
.collect(),
}
}
}
#[derive(Deserialize)]
pub struct DockerHub {
#[serde(rename(deserialize = "next"))]
next_page: Option<String>,
results: Vec<Images>,
}
impl DockerHub {
/// fetches tag information with a repository name in the form of organization/repository or library/repository in the case of official images from docker
pub fn create_repo(repo: &str) -> Result<super::Repo, Error> {
let request = format!("https://hub.docker.com/v2/repositories/{}/tags", repo);
Self::with_url(&request)
}
/// fetches tag information from a url
pub fn with_url(url: &str) -> Result<super::Repo, Error> {
let response = match reqwest::blocking::get(url) {
Ok(result) => result,
Err(e) => return Err(Error::Fetching(format!("reqwest error: {}", e))),
};
//convert it to json
let tags = match response.json::<Self>() {
Ok(result) => result,
Err(e) => return Err(Error::Converting(format!("invalid json: {}", e))),
};
if tags.results.is_empty() {
return Err(Error::NoTagsFound);
}
Ok(super::Repo {
tags: tags.results.iter().map(|t| t.convert()).collect(),
next_page: tags.next_page,
})
}
}
| 28.7875 | 158 | 0.534954 |
ed9b99188bb9032deab557d0749b396a5b85c738 | 8,505 | #![allow(non_snake_case)]
pub use self::AtomicRmwBinOp::*;
pub use self::CallConv::*;
pub use self::CodeGenOptSize::*;
pub use self::IntPredicate::*;
pub use self::Linkage::*;
pub use self::MetadataType::*;
pub use self::RealPredicate::*;
use libc::c_uint;
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_llvm::RustString;
use std::cell::RefCell;
use std::ffi::{CStr, CString};
use std::str::FromStr;
use std::string::FromUtf8Error;
pub mod archive_ro;
pub mod diagnostic;
mod ffi;
pub use self::ffi::*;
impl LLVMRustResult {
pub fn into_result(self) -> Result<(), ()> {
match self {
LLVMRustResult::Success => Ok(()),
LLVMRustResult::Failure => Err(()),
}
}
}
pub fn AddFunctionAttrStringValue(llfn: &'a Value, idx: AttributePlace, attr: &CStr, value: &CStr) {
unsafe {
LLVMRustAddFunctionAttrStringValue(llfn, idx.as_uint(), attr.as_ptr(), value.as_ptr())
}
}
pub fn AddFunctionAttrString(llfn: &'a Value, idx: AttributePlace, attr: &CStr) {
unsafe {
LLVMRustAddFunctionAttrStringValue(llfn, idx.as_uint(), attr.as_ptr(), std::ptr::null())
}
}
#[derive(Copy, Clone)]
pub enum AttributePlace {
ReturnValue,
Argument(u32),
Function,
}
impl AttributePlace {
pub fn as_uint(self) -> c_uint {
match self {
AttributePlace::ReturnValue => 0,
AttributePlace::Argument(i) => 1 + i,
AttributePlace::Function => !0,
}
}
}
#[derive(Copy, Clone, PartialEq)]
#[repr(C)]
pub enum CodeGenOptSize {
CodeGenOptSizeNone = 0,
CodeGenOptSizeDefault = 1,
CodeGenOptSizeAggressive = 2,
}
impl FromStr for ArchiveKind {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"gnu" => Ok(ArchiveKind::K_GNU),
"bsd" => Ok(ArchiveKind::K_BSD),
"darwin" => Ok(ArchiveKind::K_DARWIN),
"coff" => Ok(ArchiveKind::K_COFF),
_ => Err(()),
}
}
}
pub fn SetInstructionCallConv(instr: &'a Value, cc: CallConv) {
unsafe {
LLVMSetInstructionCallConv(instr, cc as c_uint);
}
}
pub fn SetFunctionCallConv(fn_: &'a Value, cc: CallConv) {
unsafe {
LLVMSetFunctionCallConv(fn_, cc as c_uint);
}
}
// Externally visible symbols that might appear in multiple codegen units need to appear in
// their own comdat section so that the duplicates can be discarded at link time. This can for
// example happen for generics when using multiple codegen units. This function simply uses the
// value's name as the comdat value to make sure that it is in a 1-to-1 relationship to the
// function.
// For more details on COMDAT sections see e.g., http://www.airs.com/blog/archives/52
pub fn SetUniqueComdat(llmod: &Module, val: &'a Value) {
unsafe {
let name = get_value_name(val);
LLVMRustSetComdat(llmod, val, name.as_ptr().cast(), name.len());
}
}
pub fn UnsetComdat(val: &'a Value) {
unsafe {
LLVMRustUnsetComdat(val);
}
}
pub fn SetUnnamedAddress(global: &'a Value, unnamed: UnnamedAddr) {
unsafe {
LLVMSetUnnamedAddress(global, unnamed);
}
}
pub fn set_thread_local(global: &'a Value, is_thread_local: bool) {
unsafe {
LLVMSetThreadLocal(global, is_thread_local as Bool);
}
}
pub fn set_thread_local_mode(global: &'a Value, mode: ThreadLocalMode) {
unsafe {
LLVMSetThreadLocalMode(global, mode);
}
}
impl Attribute {
pub fn apply_llfn(&self, idx: AttributePlace, llfn: &Value) {
unsafe { LLVMRustAddFunctionAttribute(llfn, idx.as_uint(), *self) }
}
pub fn apply_callsite(&self, idx: AttributePlace, callsite: &Value) {
unsafe { LLVMRustAddCallSiteAttribute(callsite, idx.as_uint(), *self) }
}
pub fn unapply_llfn(&self, idx: AttributePlace, llfn: &Value) {
unsafe { LLVMRustRemoveFunctionAttributes(llfn, idx.as_uint(), *self) }
}
pub fn toggle_llfn(&self, idx: AttributePlace, llfn: &Value, set: bool) {
if set {
self.apply_llfn(idx, llfn);
} else {
self.unapply_llfn(idx, llfn);
}
}
}
// Memory-managed interface to object files.
pub struct ObjectFile {
pub llof: &'static mut ffi::ObjectFile,
}
unsafe impl Send for ObjectFile {}
impl ObjectFile {
// This will take ownership of llmb
pub fn new(llmb: &'static mut MemoryBuffer) -> Option<ObjectFile> {
unsafe {
let llof = LLVMCreateObjectFile(llmb)?;
Some(ObjectFile { llof })
}
}
}
impl Drop for ObjectFile {
fn drop(&mut self) {
unsafe {
LLVMDisposeObjectFile(&mut *(self.llof as *mut _));
}
}
}
// Memory-managed interface to section iterators.
pub struct SectionIter<'a> {
pub llsi: &'a mut SectionIterator<'a>,
}
impl Drop for SectionIter<'a> {
fn drop(&mut self) {
unsafe {
LLVMDisposeSectionIterator(&mut *(self.llsi as *mut _));
}
}
}
pub fn mk_section_iter(llof: &ffi::ObjectFile) -> SectionIter<'_> {
unsafe { SectionIter { llsi: LLVMGetSections(llof) } }
}
pub fn set_section(llglobal: &Value, section_name: &str) {
let section_name_cstr = CString::new(section_name).expect("unexpected CString error");
unsafe {
LLVMSetSection(llglobal, section_name_cstr.as_ptr());
}
}
pub fn add_global<'a>(llmod: &'a Module, ty: &'a Type, name: &str) -> &'a Value {
let name_cstr = CString::new(name).expect("unexpected CString error");
unsafe { LLVMAddGlobal(llmod, ty, name_cstr.as_ptr()) }
}
pub fn set_initializer(llglobal: &Value, constant_val: &Value) {
unsafe {
LLVMSetInitializer(llglobal, constant_val);
}
}
pub fn set_global_constant(llglobal: &Value, is_constant: bool) {
unsafe {
LLVMSetGlobalConstant(llglobal, if is_constant { ffi::True } else { ffi::False });
}
}
pub fn set_linkage(llglobal: &Value, linkage: Linkage) {
unsafe {
LLVMRustSetLinkage(llglobal, linkage);
}
}
pub fn set_alignment(llglobal: &Value, bytes: usize) {
unsafe {
ffi::LLVMSetAlignment(llglobal, bytes as c_uint);
}
}
/// Safe wrapper around `LLVMGetParam`, because segfaults are no fun.
pub fn get_param(llfn: &Value, index: c_uint) -> &Value {
unsafe {
assert!(
index < LLVMCountParams(llfn),
"out of bounds argument access: {} out of {} arguments",
index,
LLVMCountParams(llfn)
);
LLVMGetParam(llfn, index)
}
}
/// Safe wrapper for `LLVMGetValueName2` into a byte slice
pub fn get_value_name(value: &Value) -> &[u8] {
unsafe {
let mut len = 0;
let data = LLVMGetValueName2(value, &mut len);
std::slice::from_raw_parts(data.cast(), len)
}
}
/// Safe wrapper for `LLVMSetValueName2` from a byte slice
pub fn set_value_name(value: &Value, name: &[u8]) {
unsafe {
let data = name.as_ptr().cast();
LLVMSetValueName2(value, data, name.len());
}
}
pub fn build_string(f: impl FnOnce(&RustString)) -> Result<String, FromUtf8Error> {
let sr = RustString { bytes: RefCell::new(Vec::new()) };
f(&sr);
String::from_utf8(sr.bytes.into_inner())
}
pub fn build_byte_buffer(f: impl FnOnce(&RustString)) -> Vec<u8> {
let sr = RustString { bytes: RefCell::new(Vec::new()) };
f(&sr);
sr.bytes.into_inner()
}
pub fn twine_to_string(tr: &Twine) -> String {
unsafe {
build_string(|s| LLVMRustWriteTwineToString(tr, s)).expect("got a non-UTF8 Twine from LLVM")
}
}
pub fn last_error() -> Option<String> {
unsafe {
let cstr = LLVMRustGetLastError();
if cstr.is_null() {
None
} else {
let err = CStr::from_ptr(cstr).to_bytes();
let err = String::from_utf8_lossy(err).to_string();
libc::free(cstr as *mut _);
Some(err)
}
}
}
pub struct OperandBundleDef<'a> {
pub raw: &'a mut ffi::OperandBundleDef<'a>,
}
impl OperandBundleDef<'a> {
pub fn new(name: &str, vals: &[&'a Value]) -> Self {
let name = SmallCStr::new(name);
let def = unsafe {
LLVMRustBuildOperandBundleDef(name.as_ptr(), vals.as_ptr(), vals.len() as c_uint)
};
OperandBundleDef { raw: def }
}
}
impl Drop for OperandBundleDef<'a> {
fn drop(&mut self) {
unsafe {
LLVMRustFreeOperandBundleDef(&mut *(self.raw as *mut _));
}
}
}
| 26.829653 | 100 | 0.623751 |
eddfd985d210f6b9e352afed7bf19d309155708b | 10,982 | #[macro_use]
extern crate failure;
use kube::client::APIClient;
use kubelet::pod::pod_status;
use kubelet::{pod::Pod, Phase, Provider, Status};
use log::{debug, info};
use std::collections::HashMap;
use wascc_host::{host, Actor, NativeCapability};
const ACTOR_PUBLIC_KEY: &str = "deislabs.io/wascc-action-key";
const TARGET_WASM32_WASCC: &str = "wasm32-wascc";
/// The name of the HTTP capability.
const HTTP_CAPABILITY: &str = "wascc:http_server";
#[cfg(target_os = "linux")]
const HTTP_LIB: &str = "./lib/libwascc_httpsrv.so";
#[cfg(target_os = "macos")]
const HTTP_LIB: &str = "./lib/libwascc_httpsrv.dylib";
/// Kubernetes' view of environment variables is an unordered map of string to string.
type EnvVars = std::collections::HashMap<String, String>;
/// WasccProvider provides a Kubelet runtime implementation that executes WASM binaries.
///
/// Currently, this runtime uses WASCC as a host, loading the primary container as an actor.
/// TODO: In the future, we will look at loading capabilities using the "sidecar" metaphor
/// from Kubernetes.
#[derive(Clone)]
pub struct WasccProvider {}
#[async_trait::async_trait]
impl Provider for WasccProvider {
async fn init(&self) -> Result<(), failure::Error> {
let data = NativeCapability::from_file(HTTP_LIB)
.map_err(|e| format_err!("Failed to read HTTP capability {}: {}", HTTP_LIB, e))?;
host::add_native_capability(data)
.map_err(|e| format_err!("Failed to load HTTP capability: {}", e))
}
fn arch(&self) -> String {
TARGET_WASM32_WASCC.to_string()
}
fn can_schedule(&self, pod: &Pod) -> bool {
// If there is a node selector and it has arch set to wasm32-wascc, we can
// schedule it.
pod.spec
.as_ref()
.and_then(|s| s.node_selector.as_ref())
.and_then(|i| {
i.get("beta.kubernetes.io/arch")
.map(|v| v.eq(&TARGET_WASM32_WASCC))
})
.unwrap_or(false)
}
async fn add(&self, pod: Pod, client: APIClient) -> Result<(), failure::Error> {
// To run an Add event, we load the WASM, update the pod status to Running,
// and then execute the WASM, passing in the relevant data.
// When the pod finishes, we update the status to Succeeded unless it
// produces an error, in which case we mark it Failed.
debug!(
"Pod added {:?}",
pod.metadata.as_ref().and_then(|m| m.name.as_ref())
);
let namespace = pod
.metadata
.as_ref()
.and_then(|m| m.namespace.as_deref())
.unwrap_or_else(|| "default");
// TODO: Replace with actual image store lookup when it is merged
let data = std::fs::read("./testdata/echo.wasm")?;
// TODO: Implement this for real.
// Okay, so here is where things are REALLY unfinished. Right now, we are
// only running the first container in a pod. And we are not using the
// init containers at all. And they are not executed on their own threads.
// So this is basically a toy.
//
// What it should do:
// - for each volume
// - set up the volume map
// - for each init container:
// - set up the runtime
// - mount any volumes (popen)
// - run it to completion
// - bail with an error if it fails
// - for each container and ephemeral_container
// - set up the runtime
// - mount any volumes (popen)
// - run it to completion
// - bail if it errors
let first_container = pod.spec.as_ref().map(|s| s.containers[0].clone()).unwrap();
// This would lock us into one wascc actor per pod. I don't know if
// that is a good thing. Other containers would then be limited
// to acting as components... which largely follows the sidecar
// pattern.
//
// Another possibility is to embed the key in the image reference
// (image/foo.wasm@ed25519:PUBKEY). That might work best, but it is
// not terribly useable.
//
// A really icky one would be to just require the pubkey in the env
// vars and suck it out of there. But that violates the intention
// of env vars, which is to communicate _into_ the runtime, not to
// configure the runtime.
let pubkey = pod
.metadata
.as_ref()
.and_then(|s| s.annotations.as_ref())
.unwrap()
.get(ACTOR_PUBLIC_KEY)
.map(|a| a.to_string())
.unwrap_or_default();
debug!("{:?}", pubkey);
// TODO: Launch this in a thread. (not necessary with waSCC)
let env = self.env_vars(client.clone(), &first_container, &pod).await;
//let args = first_container.args.unwrap_or_else(|| vec![]);
match wascc_run_http(data, env, pubkey.as_str()) {
Ok(_) => {
info!("Pod is executing on a thread");
pod_status(client, &pod, "Running", namespace).await;
Ok(())
}
Err(e) => {
pod_status(client, &pod, "Failed", namespace).await;
Err(failure::format_err!("Failed to run pod: {}", e))
}
}
}
async fn modify(&self, pod: Pod, _client: APIClient) -> Result<(), failure::Error> {
// Modify will be tricky. Not only do we need to handle legitimate modifications, but we
// need to sift out modifications that simply alter the status. For the time being, we
// just ignore them, which is the wrong thing to do... except that it demos better than
// other wrong things.
info!("Pod modified");
info!(
"Modified pod spec: {}",
serde_json::to_string_pretty(&pod.status.unwrap()).unwrap()
);
Ok(())
}
async fn delete(&self, pod: Pod, _client: APIClient) -> Result<(), failure::Error> {
let pubkey = pod
.metadata
.unwrap_or_default()
.annotations
.unwrap_or_default()
.get(ACTOR_PUBLIC_KEY)
.map(|a| a.to_string())
.unwrap_or_else(|| "".into());
wascc_stop(&pubkey).map_err(|e| format_err!("Failed to stop wascc actor: {}", e))
}
async fn status(&self, pod: Pod, _client: APIClient) -> Result<Status, failure::Error> {
match pod
.metadata
.unwrap_or_default()
.annotations
.unwrap_or_default()
.get(ACTOR_PUBLIC_KEY)
{
None => Ok(Status {
phase: Phase::Unknown,
message: None,
}),
Some(pk) => {
match host::actor_claims(pk) {
None => {
// FIXME: I don't know how to tell if an actor failed.
Ok(Status {
phase: Phase::Succeeded,
message: None,
})
}
Some(_) => Ok(Status {
phase: Phase::Running,
message: None,
}),
}
}
}
}
}
/// Run a WasCC module inside of the host, configuring it to handle HTTP requests.
///
/// This bootstraps an HTTP host, using the value of the env's `PORT` key to expose a port.
fn wascc_run_http(data: Vec<u8>, env: EnvVars, key: &str) -> Result<(), failure::Error> {
let mut httpenv: HashMap<String, String> = HashMap::new();
httpenv.insert(
"PORT".into(),
env.get("PORT")
.map(|a| a.to_string())
.unwrap_or_else(|| "80".to_string()),
);
wascc_run(
data,
key,
vec![Capability {
name: HTTP_CAPABILITY,
env,
}],
)
}
/// Stop a running waSCC actor.
fn wascc_stop(key: &str) -> Result<(), wascc_host::errors::Error> {
host::remove_actor(key)
}
/// Capability describes a waSCC capability.
///
/// Capabilities are made available to actors through a two-part processthread:
/// - They must be registered
/// - For each actor, the capability must be configured
struct Capability {
name: &'static str,
env: EnvVars,
}
/// Run the given WASM data as a waSCC actor with the given public key.
///
/// The provided capabilities will be configured for this actor, but the capabilities
/// must first be loaded into the host by some other process, such as register_native_capabilities().
fn wascc_run(
data: Vec<u8>,
key: &str,
capabilities: Vec<Capability>,
) -> Result<(), failure::Error> {
info!("wascc run");
let load = Actor::from_bytes(data).map_err(|e| format_err!("Error loading WASM: {}", e))?;
host::add_actor(load).map_err(|e| format_err!("Error adding actor: {}", e))?;
capabilities.iter().try_for_each(|cap| {
info!("configuring capability {}", cap.name);
host::configure(key, cap.name, cap.env.clone())
.map_err(|e| format_err!("Error configuring capabilities for module: {}", e))
})?;
info!("Instance executing");
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
use k8s_openapi::api::core::v1::PodSpec;
#[tokio::test]
async fn test_init() {
let provider = WasccProvider {};
provider
.init()
.await
.expect("HTTP capability is registered");
}
#[test]
fn test_wascc_run() {
// Open file
let data = std::fs::read("./testdata/echo.wasm").expect("read the wasm file");
// Send into wascc_run
wascc_run_http(
data,
EnvVars::new(),
"MB4OLDIC3TCZ4Q4TGGOVAZC43VXFE2JQVRAXQMQFXUCREOOFEKOKZTY2",
)
.expect("successfully executed a WASM");
// Give the webserver a chance to start up.
std::thread::sleep(std::time::Duration::from_secs(3));
wascc_stop("MB4OLDIC3TCZ4Q4TGGOVAZC43VXFE2JQVRAXQMQFXUCREOOFEKOKZTY2")
.expect("Removed the actor");
}
#[test]
fn test_can_schedule() {
let wr = WasccProvider {};
let mut mock = Default::default();
assert!(!wr.can_schedule(&mock));
let mut selector = std::collections::BTreeMap::new();
selector.insert(
"beta.kubernetes.io/arch".to_string(),
"wasm32-wascc".to_string(),
);
mock.spec = Some(PodSpec {
node_selector: Some(selector.clone()),
..Default::default()
});
assert!(wr.can_schedule(&mock));
selector.insert("beta.kubernetes.io/arch".to_string(), "amd64".to_string());
mock.spec = Some(PodSpec {
node_selector: Some(selector),
..Default::default()
});
assert!(!wr.can_schedule(&mock));
}
}
| 35.771987 | 101 | 0.568749 |
62ef40818284004939f97b1d06791aa17395e172 | 33,283 | //! See `CompletionContext` structure.
use hir::{Local, ScopeDef, Semantics, SemanticsScope, Type};
use ide_db::base_db::{FilePosition, SourceDatabase};
use ide_db::{call_info::ActiveParameter, RootDatabase};
use syntax::{
algo::find_node_at_offset,
ast::{self, NameOrNameRef, NameOwner},
match_ast, AstNode, NodeOrToken,
SyntaxKind::*,
SyntaxNode, SyntaxToken, TextRange, TextSize,
};
use text_edit::Indel;
use crate::{
patterns::{
fn_is_prev, for_is_prev2, has_bind_pat_parent, has_block_expr_parent,
has_field_list_parent, has_impl_as_prev_sibling, has_impl_parent,
has_item_list_or_source_file_parent, has_ref_parent, has_trait_as_prev_sibling,
has_trait_parent, if_is_prev, inside_impl_trait_block, is_in_loop_body, is_match_arm,
unsafe_is_prev,
},
CompletionConfig,
};
/// `CompletionContext` is created early during completion to figure out, where
/// exactly is the cursor, syntax-wise.
#[derive(Debug)]
pub(crate) struct CompletionContext<'a> {
pub(super) sema: Semantics<'a, RootDatabase>,
pub(super) scope: SemanticsScope<'a>,
pub(super) db: &'a RootDatabase,
pub(super) config: &'a CompletionConfig,
pub(super) position: FilePosition,
/// The token before the cursor, in the original file.
pub(super) original_token: SyntaxToken,
/// The token before the cursor, in the macro-expanded file.
pub(super) token: SyntaxToken,
pub(super) krate: Option<hir::Crate>,
pub(super) expected_name: Option<NameOrNameRef>,
pub(super) expected_type: Option<Type>,
pub(super) name_ref_syntax: Option<ast::NameRef>,
pub(super) lifetime_syntax: Option<ast::Lifetime>,
pub(super) lifetime_param_syntax: Option<ast::LifetimeParam>,
pub(super) function_syntax: Option<ast::Fn>,
pub(super) use_item_syntax: Option<ast::Use>,
pub(super) record_lit_syntax: Option<ast::RecordExpr>,
pub(super) record_pat_syntax: Option<ast::RecordPat>,
pub(super) record_field_syntax: Option<ast::RecordExprField>,
pub(super) impl_def: Option<ast::Impl>,
pub(super) lifetime_allowed: bool,
/// FIXME: `ActiveParameter` is string-based, which is very very wrong
pub(super) active_parameter: Option<ActiveParameter>,
pub(super) is_param: bool,
pub(super) is_label_ref: bool,
/// If a name-binding or reference to a const in a pattern.
/// Irrefutable patterns (like let) are excluded.
pub(super) is_pat_binding_or_const: bool,
pub(super) is_irrefutable_pat_binding: bool,
/// A single-indent path, like `foo`. `::foo` should not be considered a trivial path.
pub(super) is_trivial_path: bool,
/// If not a trivial path, the prefix (qualifier).
pub(super) path_qual: Option<ast::Path>,
pub(super) after_if: bool,
/// `true` if we are a statement or a last expr in the block.
pub(super) can_be_stmt: bool,
/// `true` if we expect an expression at the cursor position.
pub(super) is_expr: bool,
/// Something is typed at the "top" level, in module or impl/trait.
pub(super) is_new_item: bool,
/// The receiver if this is a field or method access, i.e. writing something.$0
pub(super) dot_receiver: Option<ast::Expr>,
pub(super) dot_receiver_is_ambiguous_float_literal: bool,
/// If this is a call (method or function) in particular, i.e. the () are already there.
pub(super) is_call: bool,
/// Like `is_call`, but for tuple patterns.
pub(super) is_pattern_call: bool,
/// If this is a macro call, i.e. the () are already there.
pub(super) is_macro_call: bool,
pub(super) is_path_type: bool,
pub(super) has_type_args: bool,
pub(super) attribute_under_caret: Option<ast::Attr>,
pub(super) mod_declaration_under_caret: Option<ast::Module>,
pub(super) unsafe_is_prev: bool,
pub(super) if_is_prev: bool,
pub(super) block_expr_parent: bool,
pub(super) bind_pat_parent: bool,
pub(super) ref_pat_parent: bool,
pub(super) in_loop_body: bool,
pub(super) has_trait_parent: bool,
pub(super) has_impl_parent: bool,
pub(super) inside_impl_trait_block: bool,
pub(super) has_field_list_parent: bool,
pub(super) trait_as_prev_sibling: bool,
pub(super) impl_as_prev_sibling: bool,
pub(super) is_match_arm: bool,
pub(super) has_item_list_or_source_file_parent: bool,
pub(super) for_is_prev2: bool,
pub(super) fn_is_prev: bool,
pub(super) incomplete_let: bool,
pub(super) locals: Vec<(String, Local)>,
}
impl<'a> CompletionContext<'a> {
pub(super) fn new(
db: &'a RootDatabase,
position: FilePosition,
config: &'a CompletionConfig,
) -> Option<CompletionContext<'a>> {
let sema = Semantics::new(db);
let original_file = sema.parse(position.file_id);
// Insert a fake ident to get a valid parse tree. We will use this file
// to determine context, though the original_file will be used for
// actual completion.
let file_with_fake_ident = {
let parse = db.parse(position.file_id);
let edit = Indel::insert(position.offset, "intellijRulezz".to_string());
parse.reparse(&edit).tree()
};
let fake_ident_token =
file_with_fake_ident.syntax().token_at_offset(position.offset).right_biased().unwrap();
let krate = sema.to_module_def(position.file_id).map(|m| m.krate());
let original_token =
original_file.syntax().token_at_offset(position.offset).left_biased()?;
let token = sema.descend_into_macros(original_token.clone());
let scope = sema.scope_at_offset(&token, position.offset);
let mut locals = vec![];
scope.process_all_names(&mut |name, scope| {
if let ScopeDef::Local(local) = scope {
locals.push((name.to_string(), local));
}
});
let mut ctx = CompletionContext {
sema,
scope,
db,
config,
position,
original_token,
token,
krate,
lifetime_allowed: false,
expected_name: None,
expected_type: None,
name_ref_syntax: None,
lifetime_syntax: None,
lifetime_param_syntax: None,
function_syntax: None,
use_item_syntax: None,
record_lit_syntax: None,
record_pat_syntax: None,
record_field_syntax: None,
impl_def: None,
active_parameter: ActiveParameter::at(db, position),
is_label_ref: false,
is_param: false,
is_pat_binding_or_const: false,
is_irrefutable_pat_binding: false,
is_trivial_path: false,
path_qual: None,
after_if: false,
can_be_stmt: false,
is_expr: false,
is_new_item: false,
dot_receiver: None,
dot_receiver_is_ambiguous_float_literal: false,
is_call: false,
is_pattern_call: false,
is_macro_call: false,
is_path_type: false,
has_type_args: false,
attribute_under_caret: None,
mod_declaration_under_caret: None,
unsafe_is_prev: false,
if_is_prev: false,
block_expr_parent: false,
bind_pat_parent: false,
ref_pat_parent: false,
in_loop_body: false,
has_trait_parent: false,
has_impl_parent: false,
inside_impl_trait_block: false,
has_field_list_parent: false,
trait_as_prev_sibling: false,
impl_as_prev_sibling: false,
is_match_arm: false,
has_item_list_or_source_file_parent: false,
for_is_prev2: false,
fn_is_prev: false,
incomplete_let: false,
locals,
};
let mut original_file = original_file.syntax().clone();
let mut hypothetical_file = file_with_fake_ident.syntax().clone();
let mut offset = position.offset;
let mut fake_ident_token = fake_ident_token;
// Are we inside a macro call?
while let (Some(actual_macro_call), Some(macro_call_with_fake_ident)) = (
find_node_at_offset::<ast::MacroCall>(&original_file, offset),
find_node_at_offset::<ast::MacroCall>(&hypothetical_file, offset),
) {
if actual_macro_call.path().as_ref().map(|s| s.syntax().text())
!= macro_call_with_fake_ident.path().as_ref().map(|s| s.syntax().text())
{
break;
}
let hypothetical_args = match macro_call_with_fake_ident.token_tree() {
Some(tt) => tt,
None => break,
};
if let (Some(actual_expansion), Some(hypothetical_expansion)) = (
ctx.sema.expand(&actual_macro_call),
ctx.sema.speculative_expand(
&actual_macro_call,
&hypothetical_args,
fake_ident_token,
),
) {
let new_offset = hypothetical_expansion.1.text_range().start();
if new_offset > actual_expansion.text_range().end() {
break;
}
original_file = actual_expansion;
hypothetical_file = hypothetical_expansion.0;
fake_ident_token = hypothetical_expansion.1;
offset = new_offset;
} else {
break;
}
}
ctx.fill_keyword_patterns(&hypothetical_file, offset);
ctx.fill(&original_file, hypothetical_file, offset);
Some(ctx)
}
/// Checks whether completions in that particular case don't make much sense.
/// Examples:
/// - `fn $0` -- we expect function name, it's unlikely that "hint" will be helpful.
/// Exception for this case is `impl Trait for Foo`, where we would like to hint trait method names.
/// - `for _ i$0` -- obviously, it'll be "in" keyword.
pub(crate) fn no_completion_required(&self) -> bool {
(self.fn_is_prev && !self.inside_impl_trait_block) || self.for_is_prev2
}
/// The range of the identifier that is being completed.
pub(crate) fn source_range(&self) -> TextRange {
// check kind of macro-expanded token, but use range of original token
let kind = self.token.kind();
if kind == IDENT || kind == LIFETIME_IDENT || kind == UNDERSCORE || kind.is_keyword() {
cov_mark::hit!(completes_if_prefix_is_keyword);
self.original_token.text_range()
} else if kind == CHAR {
// assume we are completing a lifetime but the user has only typed the '
cov_mark::hit!(completes_if_lifetime_without_idents);
TextRange::at(self.original_token.text_range().start(), TextSize::from(1))
} else {
TextRange::empty(self.position.offset)
}
}
fn fill_keyword_patterns(&mut self, file_with_fake_ident: &SyntaxNode, offset: TextSize) {
let fake_ident_token = file_with_fake_ident.token_at_offset(offset).right_biased().unwrap();
let syntax_element = NodeOrToken::Token(fake_ident_token);
self.block_expr_parent = has_block_expr_parent(syntax_element.clone());
self.unsafe_is_prev = unsafe_is_prev(syntax_element.clone());
self.if_is_prev = if_is_prev(syntax_element.clone());
self.bind_pat_parent = has_bind_pat_parent(syntax_element.clone());
self.ref_pat_parent = has_ref_parent(syntax_element.clone());
self.in_loop_body = is_in_loop_body(syntax_element.clone());
self.has_trait_parent = has_trait_parent(syntax_element.clone());
self.has_impl_parent = has_impl_parent(syntax_element.clone());
self.inside_impl_trait_block = inside_impl_trait_block(syntax_element.clone());
self.has_field_list_parent = has_field_list_parent(syntax_element.clone());
self.impl_as_prev_sibling = has_impl_as_prev_sibling(syntax_element.clone());
self.trait_as_prev_sibling = has_trait_as_prev_sibling(syntax_element.clone());
self.is_match_arm = is_match_arm(syntax_element.clone());
self.has_item_list_or_source_file_parent =
has_item_list_or_source_file_parent(syntax_element.clone());
self.mod_declaration_under_caret =
find_node_at_offset::<ast::Module>(&file_with_fake_ident, offset)
.filter(|module| module.item_list().is_none());
self.for_is_prev2 = for_is_prev2(syntax_element.clone());
self.fn_is_prev = fn_is_prev(syntax_element.clone());
self.incomplete_let =
syntax_element.ancestors().take(6).find_map(ast::LetStmt::cast).map_or(false, |it| {
it.syntax().text_range().end() == syntax_element.text_range().end()
});
}
fn fill_impl_def(&mut self) {
self.impl_def = self
.sema
.token_ancestors_with_macros(self.token.clone())
.take_while(|it| it.kind() != SOURCE_FILE && it.kind() != MODULE)
.find_map(ast::Impl::cast);
}
fn expected_type_and_name(&self) -> (Option<Type>, Option<NameOrNameRef>) {
let mut node = match self.token.parent() {
Some(it) => it,
None => return (None, None),
};
loop {
break match_ast! {
match node {
ast::LetStmt(it) => {
cov_mark::hit!(expected_type_let_with_leading_char);
cov_mark::hit!(expected_type_let_without_leading_char);
let ty = it.pat()
.and_then(|pat| self.sema.type_of_pat(&pat));
let name = if let Some(ast::Pat::IdentPat(ident)) = it.pat() {
ident.name().map(NameOrNameRef::Name)
} else {
None
};
(ty, name)
},
ast::ArgList(_it) => {
cov_mark::hit!(expected_type_fn_param_with_leading_char);
cov_mark::hit!(expected_type_fn_param_without_leading_char);
ActiveParameter::at_token(
&self.sema,
self.token.clone(),
).map(|ap| {
let name = ap.ident().map(NameOrNameRef::Name);
(Some(ap.ty), name)
})
.unwrap_or((None, None))
},
ast::RecordExprFieldList(_it) => {
cov_mark::hit!(expected_type_struct_field_without_leading_char);
self.token.prev_sibling_or_token()
.and_then(|se| se.into_node())
.and_then(|node| ast::RecordExprField::cast(node))
.and_then(|rf| self.sema.resolve_record_field(&rf).zip(Some(rf)))
.map(|(f, rf)|(
Some(f.0.ty(self.db)),
rf.field_name().map(NameOrNameRef::NameRef),
))
.unwrap_or((None, None))
},
ast::RecordExprField(it) => {
cov_mark::hit!(expected_type_struct_field_with_leading_char);
self.sema
.resolve_record_field(&it)
.map(|f|(
Some(f.0.ty(self.db)),
it.field_name().map(NameOrNameRef::NameRef),
))
.unwrap_or((None, None))
},
ast::MatchExpr(it) => {
cov_mark::hit!(expected_type_match_arm_without_leading_char);
let ty = it.expr()
.and_then(|e| self.sema.type_of_expr(&e));
(ty, None)
},
ast::IfExpr(it) => {
cov_mark::hit!(expected_type_if_let_without_leading_char);
let ty = it.condition()
.and_then(|cond| cond.expr())
.and_then(|e| self.sema.type_of_expr(&e));
(ty, None)
},
ast::IdentPat(it) => {
cov_mark::hit!(expected_type_if_let_with_leading_char);
cov_mark::hit!(expected_type_match_arm_with_leading_char);
let ty = self.sema.type_of_pat(&ast::Pat::from(it));
(ty, None)
},
ast::Fn(it) => {
cov_mark::hit!(expected_type_fn_ret_with_leading_char);
cov_mark::hit!(expected_type_fn_ret_without_leading_char);
let def = self.sema.to_def(&it);
(def.map(|def| def.ret_type(self.db)), None)
},
ast::Stmt(_it) => (None, None),
_ => {
match node.parent() {
Some(n) => {
node = n;
continue;
},
None => (None, None),
}
},
}
};
}
}
fn fill(
&mut self,
original_file: &SyntaxNode,
file_with_fake_ident: SyntaxNode,
offset: TextSize,
) {
let (expected_type, expected_name) = self.expected_type_and_name();
self.expected_type = expected_type;
self.expected_name = expected_name;
self.attribute_under_caret = find_node_at_offset(&file_with_fake_ident, offset);
if let Some(lifetime) = find_node_at_offset::<ast::Lifetime>(&file_with_fake_ident, offset)
{
self.classify_lifetime(original_file, lifetime, offset);
}
// First, let's try to complete a reference to some declaration.
if let Some(name_ref) = find_node_at_offset::<ast::NameRef>(&file_with_fake_ident, offset) {
// Special case, `trait T { fn foo(i_am_a_name_ref) {} }`.
// See RFC#1685.
if is_node::<ast::Param>(name_ref.syntax()) {
self.is_param = true;
return;
}
// FIXME: remove this (V) duplication and make the check more precise
if name_ref.syntax().ancestors().find_map(ast::RecordPatFieldList::cast).is_some() {
self.record_pat_syntax =
self.sema.find_node_at_offset_with_macros(&original_file, offset);
}
self.classify_name_ref(original_file, name_ref, offset);
}
// Otherwise, see if this is a declaration. We can use heuristics to
// suggest declaration names, see `CompletionKind::Magic`.
if let Some(name) = find_node_at_offset::<ast::Name>(&file_with_fake_ident, offset) {
if let Some(bind_pat) = name.syntax().ancestors().find_map(ast::IdentPat::cast) {
self.is_pat_binding_or_const = true;
if bind_pat.at_token().is_some()
|| bind_pat.ref_token().is_some()
|| bind_pat.mut_token().is_some()
{
self.is_pat_binding_or_const = false;
}
if bind_pat.syntax().parent().and_then(ast::RecordPatFieldList::cast).is_some() {
self.is_pat_binding_or_const = false;
}
if let Some(Some(pat)) = bind_pat.syntax().ancestors().find_map(|node| {
match_ast! {
match node {
ast::LetStmt(it) => Some(it.pat()),
ast::Param(it) => Some(it.pat()),
_ => None,
}
}
}) {
if pat.syntax().text_range().contains_range(bind_pat.syntax().text_range()) {
self.is_pat_binding_or_const = false;
self.is_irrefutable_pat_binding = true;
}
}
self.fill_impl_def();
}
if is_node::<ast::Param>(name.syntax()) {
self.is_param = true;
return;
}
// FIXME: remove this (^) duplication and make the check more precise
if name.syntax().ancestors().find_map(ast::RecordPatFieldList::cast).is_some() {
self.record_pat_syntax =
self.sema.find_node_at_offset_with_macros(&original_file, offset);
}
}
}
fn classify_lifetime(
&mut self,
original_file: &SyntaxNode,
lifetime: ast::Lifetime,
offset: TextSize,
) {
self.lifetime_syntax =
find_node_at_offset(original_file, lifetime.syntax().text_range().start());
if let Some(parent) = lifetime.syntax().parent() {
if parent.kind() == ERROR {
return;
}
match_ast! {
match parent {
ast::LifetimeParam(_it) => {
self.lifetime_allowed = true;
self.lifetime_param_syntax =
self.sema.find_node_at_offset_with_macros(original_file, offset);
},
ast::BreakExpr(_it) => self.is_label_ref = true,
ast::ContinueExpr(_it) => self.is_label_ref = true,
ast::Label(_it) => (),
_ => self.lifetime_allowed = true,
}
}
}
}
fn classify_name_ref(
&mut self,
original_file: &SyntaxNode,
name_ref: ast::NameRef,
offset: TextSize,
) {
self.name_ref_syntax =
find_node_at_offset(original_file, name_ref.syntax().text_range().start());
let name_range = name_ref.syntax().text_range();
if ast::RecordExprField::for_field_name(&name_ref).is_some() {
self.record_lit_syntax =
self.sema.find_node_at_offset_with_macros(original_file, offset);
}
self.fill_impl_def();
let top_node = name_ref
.syntax()
.ancestors()
.take_while(|it| it.text_range() == name_range)
.last()
.unwrap();
match top_node.parent().map(|it| it.kind()) {
Some(SOURCE_FILE) | Some(ITEM_LIST) => {
self.is_new_item = true;
return;
}
_ => (),
}
self.use_item_syntax =
self.sema.token_ancestors_with_macros(self.token.clone()).find_map(ast::Use::cast);
self.function_syntax = self
.sema
.token_ancestors_with_macros(self.token.clone())
.take_while(|it| it.kind() != SOURCE_FILE && it.kind() != MODULE)
.find_map(ast::Fn::cast);
self.record_field_syntax = self
.sema
.token_ancestors_with_macros(self.token.clone())
.take_while(|it| {
it.kind() != SOURCE_FILE && it.kind() != MODULE && it.kind() != CALL_EXPR
})
.find_map(ast::RecordExprField::cast);
let parent = match name_ref.syntax().parent() {
Some(it) => it,
None => return,
};
if let Some(segment) = ast::PathSegment::cast(parent.clone()) {
let path = segment.parent_path();
self.is_call = path
.syntax()
.parent()
.and_then(ast::PathExpr::cast)
.and_then(|it| it.syntax().parent().and_then(ast::CallExpr::cast))
.is_some();
self.is_macro_call = path.syntax().parent().and_then(ast::MacroCall::cast).is_some();
self.is_pattern_call =
path.syntax().parent().and_then(ast::TupleStructPat::cast).is_some();
self.is_path_type = path.syntax().parent().and_then(ast::PathType::cast).is_some();
self.has_type_args = segment.generic_arg_list().is_some();
if let Some(path) = path_or_use_tree_qualifier(&path) {
self.path_qual = path
.segment()
.and_then(|it| {
find_node_with_range::<ast::PathSegment>(
original_file,
it.syntax().text_range(),
)
})
.map(|it| it.parent_path());
return;
}
if let Some(segment) = path.segment() {
if segment.coloncolon_token().is_some() {
return;
}
}
self.is_trivial_path = true;
// Find either enclosing expr statement (thing with `;`) or a
// block. If block, check that we are the last expr.
self.can_be_stmt = name_ref
.syntax()
.ancestors()
.find_map(|node| {
if let Some(stmt) = ast::ExprStmt::cast(node.clone()) {
return Some(stmt.syntax().text_range() == name_ref.syntax().text_range());
}
if let Some(block) = ast::BlockExpr::cast(node) {
return Some(
block.tail_expr().map(|e| e.syntax().text_range())
== Some(name_ref.syntax().text_range()),
);
}
None
})
.unwrap_or(false);
self.is_expr = path.syntax().parent().and_then(ast::PathExpr::cast).is_some();
if let Some(off) = name_ref.syntax().text_range().start().checked_sub(2.into()) {
if let Some(if_expr) =
self.sema.find_node_at_offset_with_macros::<ast::IfExpr>(original_file, off)
{
if if_expr.syntax().text_range().end() < name_ref.syntax().text_range().start()
{
self.after_if = true;
}
}
}
}
if let Some(field_expr) = ast::FieldExpr::cast(parent.clone()) {
// The receiver comes before the point of insertion of the fake
// ident, so it should have the same range in the non-modified file
self.dot_receiver = field_expr
.expr()
.map(|e| e.syntax().text_range())
.and_then(|r| find_node_with_range(original_file, r));
self.dot_receiver_is_ambiguous_float_literal =
if let Some(ast::Expr::Literal(l)) = &self.dot_receiver {
match l.kind() {
ast::LiteralKind::FloatNumber { .. } => l.token().text().ends_with('.'),
_ => false,
}
} else {
false
};
}
if let Some(method_call_expr) = ast::MethodCallExpr::cast(parent) {
// As above
self.dot_receiver = method_call_expr
.receiver()
.map(|e| e.syntax().text_range())
.and_then(|r| find_node_with_range(original_file, r));
self.is_call = true;
}
}
}
fn find_node_with_range<N: AstNode>(syntax: &SyntaxNode, range: TextRange) -> Option<N> {
syntax.covering_element(range).ancestors().find_map(N::cast)
}
fn is_node<N: AstNode>(node: &SyntaxNode) -> bool {
match node.ancestors().find_map(N::cast) {
None => false,
Some(n) => n.syntax().text_range() == node.text_range(),
}
}
fn path_or_use_tree_qualifier(path: &ast::Path) -> Option<ast::Path> {
if let Some(qual) = path.qualifier() {
return Some(qual);
}
let use_tree_list = path.syntax().ancestors().find_map(ast::UseTreeList::cast)?;
let use_tree = use_tree_list.syntax().parent().and_then(ast::UseTree::cast)?;
use_tree.path()
}
#[cfg(test)]
mod tests {
use expect_test::{expect, Expect};
use hir::HirDisplay;
use crate::test_utils::{position, TEST_CONFIG};
use super::CompletionContext;
fn check_expected_type_and_name(ra_fixture: &str, expect: Expect) {
let (db, pos) = position(ra_fixture);
let completion_context = CompletionContext::new(&db, pos, &TEST_CONFIG).unwrap();
let ty = completion_context
.expected_type
.map(|t| t.display_test(&db).to_string())
.unwrap_or("?".to_owned());
let name = completion_context
.expected_name
.map_or_else(|| "?".to_owned(), |name| name.to_string());
expect.assert_eq(&format!("ty: {}, name: {}", ty, name));
}
#[test]
fn expected_type_let_without_leading_char() {
cov_mark::check!(expected_type_let_without_leading_char);
check_expected_type_and_name(
r#"
fn foo() {
let x: u32 = $0;
}
"#,
expect![[r#"ty: u32, name: x"#]],
);
}
#[test]
fn expected_type_let_with_leading_char() {
cov_mark::check!(expected_type_let_with_leading_char);
check_expected_type_and_name(
r#"
fn foo() {
let x: u32 = c$0;
}
"#,
expect![[r#"ty: u32, name: x"#]],
);
}
#[test]
fn expected_type_fn_param_without_leading_char() {
cov_mark::check!(expected_type_fn_param_without_leading_char);
check_expected_type_and_name(
r#"
fn foo() {
bar($0);
}
fn bar(x: u32) {}
"#,
expect![[r#"ty: u32, name: x"#]],
);
}
#[test]
fn expected_type_fn_param_with_leading_char() {
cov_mark::check!(expected_type_fn_param_with_leading_char);
check_expected_type_and_name(
r#"
fn foo() {
bar(c$0);
}
fn bar(x: u32) {}
"#,
expect![[r#"ty: u32, name: x"#]],
);
}
#[test]
fn expected_type_struct_field_without_leading_char() {
cov_mark::check!(expected_type_struct_field_without_leading_char);
check_expected_type_and_name(
r#"
struct Foo { a: u32 }
fn foo() {
Foo { a: $0 };
}
"#,
expect![[r#"ty: u32, name: a"#]],
)
}
#[test]
fn expected_type_struct_field_with_leading_char() {
cov_mark::check!(expected_type_struct_field_with_leading_char);
check_expected_type_and_name(
r#"
struct Foo { a: u32 }
fn foo() {
Foo { a: c$0 };
}
"#,
expect![[r#"ty: u32, name: a"#]],
);
}
#[test]
fn expected_type_match_arm_without_leading_char() {
cov_mark::check!(expected_type_match_arm_without_leading_char);
check_expected_type_and_name(
r#"
enum E { X }
fn foo() {
match E::X { $0 }
}
"#,
expect![[r#"ty: E, name: ?"#]],
);
}
#[test]
fn expected_type_match_arm_with_leading_char() {
cov_mark::check!(expected_type_match_arm_with_leading_char);
check_expected_type_and_name(
r#"
enum E { X }
fn foo() {
match E::X { c$0 }
}
"#,
expect![[r#"ty: E, name: ?"#]],
);
}
#[test]
fn expected_type_if_let_without_leading_char() {
cov_mark::check!(expected_type_if_let_without_leading_char);
check_expected_type_and_name(
r#"
enum Foo { Bar, Baz, Quux }
fn foo() {
let f = Foo::Quux;
if let $0 = f { }
}
"#,
expect![[r#"ty: Foo, name: ?"#]],
)
}
#[test]
fn expected_type_if_let_with_leading_char() {
cov_mark::check!(expected_type_if_let_with_leading_char);
check_expected_type_and_name(
r#"
enum Foo { Bar, Baz, Quux }
fn foo() {
let f = Foo::Quux;
if let c$0 = f { }
}
"#,
expect![[r#"ty: Foo, name: ?"#]],
)
}
#[test]
fn expected_type_fn_ret_without_leading_char() {
cov_mark::check!(expected_type_fn_ret_without_leading_char);
check_expected_type_and_name(
r#"
fn foo() -> u32 {
$0
}
"#,
expect![[r#"ty: u32, name: ?"#]],
)
}
#[test]
fn expected_type_fn_ret_with_leading_char() {
cov_mark::check!(expected_type_fn_ret_with_leading_char);
check_expected_type_and_name(
r#"
fn foo() -> u32 {
c$0
}
"#,
expect![[r#"ty: u32, name: ?"#]],
)
}
#[test]
fn expected_type_fn_ret_fn_ref_fully_typed() {
check_expected_type_and_name(
r#"
fn foo() -> u32 {
foo$0
}
"#,
expect![[r#"ty: u32, name: ?"#]],
)
}
}
| 37.907745 | 106 | 0.548538 |
7149d9c7a27f45300ad10ed9406c4013bc3ff586 | 24,522 | // Copyright (c) 2017-present PyO3 Project and Contributors
use crate::method::{FnArg, FnSpec, FnType};
use crate::utils;
use proc_macro2::{Span, TokenStream};
use quote::quote;
use syn::ext::IdentExt;
pub enum PropertyType<'a> {
Descriptor(&'a syn::Field),
Function(&'a FnSpec<'a>),
}
pub fn gen_py_method(
cls: &syn::Type,
sig: &mut syn::Signature,
meth_attrs: &mut Vec<syn::Attribute>,
) -> syn::Result<TokenStream> {
check_generic(sig)?;
let spec = FnSpec::parse(sig, &mut *meth_attrs, true)?;
Ok(match spec.tp {
FnType::Fn => impl_py_method_def(&spec, &impl_wrap(cls, &spec, true)),
FnType::PySelfRef(ref self_ty) => {
impl_py_method_def(&spec, &impl_wrap_pyslf(cls, &spec, self_ty, true))
}
FnType::PySelfPath(ref self_ty) => {
impl_py_method_def(&spec, &impl_wrap_pyslf(cls, &spec, self_ty, true))
}
FnType::FnNew => impl_py_method_def_new(&spec, &impl_wrap_new(cls, &spec)),
FnType::FnCall => impl_py_method_def_call(&spec, &impl_wrap(cls, &spec, false)),
FnType::FnClass => impl_py_method_def_class(&spec, &impl_wrap_class(cls, &spec)),
FnType::FnStatic => impl_py_method_def_static(&spec, &impl_wrap_static(cls, &spec)),
FnType::Getter => impl_py_getter_def(
&spec.python_name,
&spec.doc,
&impl_wrap_getter(cls, PropertyType::Function(&spec))?,
),
FnType::Setter => impl_py_setter_def(
&spec.python_name,
&spec.doc,
&impl_wrap_setter(cls, PropertyType::Function(&spec))?,
),
})
}
fn check_generic(sig: &syn::Signature) -> syn::Result<()> {
let err_msg = |typ| format!("A Python method can't have a generic {} parameter", typ);
for param in &sig.generics.params {
match param {
syn::GenericParam::Lifetime(_) => {}
syn::GenericParam::Type(_) => {
return Err(syn::Error::new_spanned(param, err_msg("type")));
}
syn::GenericParam::Const(_) => {
return Err(syn::Error::new_spanned(param, err_msg("const")));
}
}
}
Ok(())
}
/// Generate function wrapper (PyCFunction, PyCFunctionWithKeywords)
pub fn impl_wrap(cls: &syn::Type, spec: &FnSpec<'_>, noargs: bool) -> TokenStream {
let body = impl_call(cls, &spec);
let borrow_self = spec.borrow_self();
let slf = quote! {
let _slf = _py.from_borrowed_ptr::<pyo3::PyCell<#cls>>(_slf);
#borrow_self
};
impl_wrap_common(cls, spec, noargs, slf, body)
}
pub fn impl_wrap_pyslf(
cls: &syn::Type,
spec: &FnSpec<'_>,
self_ty: impl quote::ToTokens,
noargs: bool,
) -> TokenStream {
let names = get_arg_names(spec);
let name = &spec.name;
let body = quote! {
#cls::#name(_slf, #(#names),*)
};
let slf = quote! {
let _cell = _py.from_borrowed_ptr::<pyo3::PyCell<#cls>>(_slf);
let _slf: #self_ty = match std::convert::TryFrom::try_from(_cell) {
Ok(_slf) => _slf,
Err(e) => return pyo3::PyErr::from(e).restore_and_null(_py),
};
};
impl_wrap_common(cls, spec, noargs, slf, body)
}
fn impl_wrap_common(
cls: &syn::Type,
spec: &FnSpec<'_>,
noargs: bool,
slf: TokenStream,
body: TokenStream,
) -> TokenStream {
let python_name = &spec.python_name;
if spec.args.is_empty() && noargs {
quote! {
unsafe extern "C" fn __wrap(
_slf: *mut pyo3::ffi::PyObject,
_args: *mut pyo3::ffi::PyObject,
) -> *mut pyo3::ffi::PyObject
{
const _LOCATION: &'static str = concat!(
stringify!(#cls), ".", stringify!(#python_name), "()");
let _py = pyo3::Python::assume_gil_acquired();
let _pool = pyo3::GILPool::new(_py);
#slf
let _result = {
pyo3::derive_utils::IntoPyResult::into_py_result(#body)
};
pyo3::callback::cb_obj_convert(_py, _result)
}
}
} else {
let body = impl_arg_params(&spec, body);
quote! {
unsafe extern "C" fn __wrap(
_slf: *mut pyo3::ffi::PyObject,
_args: *mut pyo3::ffi::PyObject,
_kwargs: *mut pyo3::ffi::PyObject) -> *mut pyo3::ffi::PyObject
{
const _LOCATION: &'static str = concat!(
stringify!(#cls), ".", stringify!(#python_name), "()");
let _py = pyo3::Python::assume_gil_acquired();
let _pool = pyo3::GILPool::new(_py);
#slf
let _args = _py.from_borrowed_ptr::<pyo3::types::PyTuple>(_args);
let _kwargs: Option<&pyo3::types::PyDict> = _py.from_borrowed_ptr_or_opt(_kwargs);
#body
pyo3::callback::cb_obj_convert(_py, _result)
}
}
}
}
/// Generate function wrapper for protocol method (PyCFunction, PyCFunctionWithKeywords)
pub fn impl_proto_wrap(cls: &syn::Type, spec: &FnSpec<'_>) -> TokenStream {
let python_name = &spec.python_name;
let cb = impl_call(cls, &spec);
let body = impl_arg_params(&spec, cb);
let borrow_self = spec.borrow_self();
quote! {
#[allow(unused_mut)]
unsafe extern "C" fn __wrap(
_slf: *mut pyo3::ffi::PyObject,
_args: *mut pyo3::ffi::PyObject,
_kwargs: *mut pyo3::ffi::PyObject) -> *mut pyo3::ffi::PyObject
{
const _LOCATION: &'static str = concat!(stringify!(#cls),".",stringify!(#python_name),"()");
let _py = pyo3::Python::assume_gil_acquired();
let _pool = pyo3::GILPool::new(_py);
let _slf = _py.from_borrowed_ptr::<pyo3::PyCell<#cls>>(_slf);
#borrow_self
let _args = _py.from_borrowed_ptr::<pyo3::types::PyTuple>(_args);
let _kwargs: Option<&pyo3::types::PyDict> = _py.from_borrowed_ptr_or_opt(_kwargs);
#body
pyo3::callback::cb_obj_convert(_py, _result)
}
}
}
/// Generate class method wrapper (PyCFunction, PyCFunctionWithKeywords)
pub fn impl_wrap_new(cls: &syn::Type, spec: &FnSpec<'_>) -> TokenStream {
let name = &spec.name;
let python_name = &spec.python_name;
let names: Vec<syn::Ident> = get_arg_names(&spec);
let cb = quote! { #cls::#name(#(#names),*) };
let body = impl_arg_params_(
spec,
cb,
quote! { pyo3::derive_utils::IntoPyNewResult::into_pynew_result },
);
quote! {
#[allow(unused_mut)]
unsafe extern "C" fn __wrap(
_cls: *mut pyo3::ffi::PyTypeObject,
_args: *mut pyo3::ffi::PyObject,
_kwargs: *mut pyo3::ffi::PyObject) -> *mut pyo3::ffi::PyObject
{
use pyo3::type_object::PyTypeInfo;
const _LOCATION: &'static str = concat!(stringify!(#cls),".",stringify!(#python_name),"()");
let _py = pyo3::Python::assume_gil_acquired();
let _pool = pyo3::GILPool::new(_py);
let _args = _py.from_borrowed_ptr::<pyo3::types::PyTuple>(_args);
let _kwargs: Option<&pyo3::types::PyDict> = _py.from_borrowed_ptr_or_opt(_kwargs);
#body
match _result.and_then(|init| pyo3::PyClassInitializer::from(init).create_cell(_py)) {
Ok(slf) => slf as _,
Err(e) => e.restore_and_null(_py),
}
}
}
}
/// Generate class method wrapper (PyCFunction, PyCFunctionWithKeywords)
pub fn impl_wrap_class(cls: &syn::Type, spec: &FnSpec<'_>) -> TokenStream {
let name = &spec.name;
let python_name = &spec.python_name;
let names: Vec<syn::Ident> = get_arg_names(&spec);
let cb = quote! { #cls::#name(&_cls, #(#names),*) };
let body = impl_arg_params(spec, cb);
quote! {
#[allow(unused_mut)]
unsafe extern "C" fn __wrap(
_cls: *mut pyo3::ffi::PyObject,
_args: *mut pyo3::ffi::PyObject,
_kwargs: *mut pyo3::ffi::PyObject) -> *mut pyo3::ffi::PyObject
{
const _LOCATION: &'static str = concat!(stringify!(#cls),".",stringify!(#python_name),"()");
let _py = pyo3::Python::assume_gil_acquired();
let _pool = pyo3::GILPool::new(_py);
let _cls = pyo3::types::PyType::from_type_ptr(_py, _cls as *mut pyo3::ffi::PyTypeObject);
let _args = _py.from_borrowed_ptr::<pyo3::types::PyTuple>(_args);
let _kwargs: Option<&pyo3::types::PyDict> = _py.from_borrowed_ptr_or_opt(_kwargs);
#body
pyo3::callback::cb_obj_convert(_py, _result)
}
}
}
/// Generate static method wrapper (PyCFunction, PyCFunctionWithKeywords)
pub fn impl_wrap_static(cls: &syn::Type, spec: &FnSpec<'_>) -> TokenStream {
let name = &spec.name;
let python_name = &spec.python_name;
let names: Vec<syn::Ident> = get_arg_names(&spec);
let cb = quote! { #cls::#name(#(#names),*) };
let body = impl_arg_params(spec, cb);
quote! {
#[allow(unused_mut)]
unsafe extern "C" fn __wrap(
_slf: *mut pyo3::ffi::PyObject,
_args: *mut pyo3::ffi::PyObject,
_kwargs: *mut pyo3::ffi::PyObject) -> *mut pyo3::ffi::PyObject
{
const _LOCATION: &'static str = concat!(stringify!(#cls),".",stringify!(#python_name),"()");
let _py = pyo3::Python::assume_gil_acquired();
let _pool = pyo3::GILPool::new(_py);
let _args = _py.from_borrowed_ptr::<pyo3::types::PyTuple>(_args);
let _kwargs: Option<&pyo3::types::PyDict> = _py.from_borrowed_ptr_or_opt(_kwargs);
#body
pyo3::callback::cb_obj_convert(_py, _result)
}
}
}
fn impl_call_getter(spec: &FnSpec) -> syn::Result<TokenStream> {
let (py_arg, args) = split_off_python_arg(&spec.args);
if !args.is_empty() {
return Err(syn::Error::new_spanned(
args[0].ty,
"Getter function can only have one argument of type pyo3::Python",
));
}
let name = &spec.name;
let fncall = if py_arg.is_some() {
quote! { _slf.#name(_py) }
} else {
quote! { _slf.#name() }
};
Ok(fncall)
}
/// Generate a function wrapper called `__wrap` for a property getter
pub(crate) fn impl_wrap_getter(
cls: &syn::Type,
property_type: PropertyType,
) -> syn::Result<TokenStream> {
let (python_name, getter_impl) = match property_type {
PropertyType::Descriptor(field) => {
let name = field.ident.as_ref().unwrap();
(
name.unraw(),
quote!({
use pyo3::derive_utils::GetPropertyValue;
(&_slf.#name).get_property_value(_py)
}),
)
}
PropertyType::Function(spec) => (spec.python_name.clone(), impl_call_getter(&spec)?),
};
let borrow_self = crate::utils::borrow_self(false, true);
Ok(quote! {
unsafe extern "C" fn __wrap(
_slf: *mut pyo3::ffi::PyObject, _: *mut ::std::os::raw::c_void) -> *mut pyo3::ffi::PyObject
{
const _LOCATION: &'static str = concat!(stringify!(#cls),".",stringify!(#python_name),"()");
let _py = pyo3::Python::assume_gil_acquired();
let _pool = pyo3::GILPool::new(_py);
let _slf = _py.from_borrowed_ptr::<pyo3::PyCell<#cls>>(_slf);
#borrow_self
let result = pyo3::derive_utils::IntoPyResult::into_py_result(#getter_impl);
match result {
Ok(val) => pyo3::IntoPyPointer::into_ptr(pyo3::IntoPy::<PyObject>::into_py(val, _py)),
Err(e) => e.restore_and_null(_py),
}
}
})
}
fn impl_call_setter(spec: &FnSpec) -> syn::Result<TokenStream> {
let (py_arg, args) = split_off_python_arg(&spec.args);
if args.is_empty() {
return Err(syn::Error::new_spanned(
&spec.name,
"Setter function expected to have one argument",
));
} else if args.len() > 1 {
return Err(syn::Error::new_spanned(
&args[1].ty,
"Setter function can have at most two arguments: one of pyo3::Python, and one other",
));
}
let name = &spec.name;
let fncall = if py_arg.is_some() {
quote!(pyo3::derive_utils::IntoPyResult::into_py_result(_slf.#name(_py, _val)))
} else {
quote!(pyo3::derive_utils::IntoPyResult::into_py_result(_slf.#name(_val)))
};
Ok(fncall)
}
/// Generate a function wrapper called `__wrap` for a property setter
pub(crate) fn impl_wrap_setter(
cls: &syn::Type,
property_type: PropertyType,
) -> syn::Result<TokenStream> {
let (python_name, setter_impl) = match property_type {
PropertyType::Descriptor(field) => {
let name = field.ident.as_ref().unwrap();
(name.unraw(), quote!({ _slf.#name = _val; Ok(()) }))
}
PropertyType::Function(spec) => (spec.python_name.clone(), impl_call_setter(&spec)?),
};
let borrow_self = crate::utils::borrow_self(true, false);
Ok(quote! {
#[allow(unused_mut)]
unsafe extern "C" fn __wrap(
_slf: *mut pyo3::ffi::PyObject,
_value: *mut pyo3::ffi::PyObject, _: *mut ::std::os::raw::c_void) -> pyo3::libc::c_int
{
const _LOCATION: &'static str = concat!(stringify!(#cls),".",stringify!(#python_name),"()");
let _py = pyo3::Python::assume_gil_acquired();
let _pool = pyo3::GILPool::new(_py);
let _slf = _py.from_borrowed_ptr::<pyo3::PyCell<#cls>>(_slf);
#borrow_self
let _value = _py.from_borrowed_ptr(_value);
let _result = match pyo3::FromPyObject::extract(_value) {
Ok(_val) => {
#setter_impl
}
Err(e) => Err(e)
};
match _result {
Ok(_) => 0,
Err(e) => e.restore_and_minus1(_py),
}
}
})
}
/// This function abstracts away some copied code and can propably be simplified itself
pub fn get_arg_names(spec: &FnSpec) -> Vec<syn::Ident> {
(0..spec.args.len())
.map(|pos| syn::Ident::new(&format!("arg{}", pos), Span::call_site()))
.collect()
}
fn impl_call(_cls: &syn::Type, spec: &FnSpec<'_>) -> TokenStream {
let fname = &spec.name;
let names = get_arg_names(spec);
quote! { _slf.#fname(#(#names),*) }
}
/// Converts a bool to "true" or "false"
fn bool_to_ident(condition: bool) -> syn::Ident {
if condition {
syn::Ident::new("true", Span::call_site())
} else {
syn::Ident::new("false", Span::call_site())
}
}
fn impl_arg_params_(spec: &FnSpec<'_>, body: TokenStream, into_result: TokenStream) -> TokenStream {
if spec.args.is_empty() {
return quote! {
let _result = {
#into_result (#body)
};
};
}
let mut params = Vec::new();
for arg in spec.args.iter() {
if arg.py || spec.is_args(&arg.name) || spec.is_kwargs(&arg.name) {
continue;
}
let name = arg.name;
let kwonly = bool_to_ident(spec.is_kw_only(&arg.name));
let opt = bool_to_ident(arg.optional.is_some() || spec.default_value(&arg.name).is_some());
params.push(quote! {
pyo3::derive_utils::ParamDescription {
name: stringify!(#name),
is_optional: #opt,
kw_only: #kwonly
}
});
}
let mut param_conversion = Vec::new();
let mut option_pos = 0;
for (idx, arg) in spec.args.iter().enumerate() {
param_conversion.push(impl_arg_param(&arg, &spec, idx, &mut option_pos));
}
let accept_args = bool_to_ident(spec.accept_args());
let accept_kwargs = bool_to_ident(spec.accept_kwargs());
let num_normal_params = params.len();
// create array of arguments, and then parse
quote! {
use pyo3::ObjectProtocol;
const PARAMS: &'static [pyo3::derive_utils::ParamDescription] = &[
#(#params),*
];
let mut output = [None; #num_normal_params];
let mut _args = _args;
let mut _kwargs = _kwargs;
// Workaround to use the question mark operator without rewriting everything
let _result = (|| {
let (_args, _kwargs) = pyo3::derive_utils::parse_fn_args(
Some(_LOCATION),
PARAMS,
_args,
_kwargs,
#accept_args,
#accept_kwargs,
&mut output
)?;
#(#param_conversion)*
#into_result(#body)
})();
}
}
pub fn impl_arg_params(spec: &FnSpec<'_>, body: TokenStream) -> TokenStream {
impl_arg_params_(
spec,
body,
quote! { pyo3::derive_utils::IntoPyResult::into_py_result },
)
}
/// Re option_pos: The option slice doesn't contain the py: Python argument, so the argument
/// index and the index in option diverge when using py: Python
fn impl_arg_param(
arg: &FnArg<'_>,
spec: &FnSpec<'_>,
idx: usize,
option_pos: &mut usize,
) -> TokenStream {
let arg_name = syn::Ident::new(&format!("arg{}", idx), Span::call_site());
if arg.py {
return quote! {
let #arg_name = _py;
};
}
let ty = arg.ty;
let name = arg.name;
if spec.is_args(&name) {
return quote! {
let #arg_name = <#ty as pyo3::FromPyObject>::extract(_args.as_ref())?;
};
} else if spec.is_kwargs(&name) {
return quote! {
let #arg_name = _kwargs;
};
}
let arg_value = quote!(output[#option_pos]);
*option_pos += 1;
return if let Some(ty) = arg.optional.as_ref() {
let default = if let Some(d) = spec.default_value(name).filter(|d| d.to_string() != "None")
{
quote! { Some(#d) }
} else {
quote! { None }
};
if let syn::Type::Reference(tref) = ty {
let (tref, mut_) = tref_preprocess(tref);
let as_deref = if mut_.is_some() {
quote! { as_deref_mut }
} else {
quote! { as_deref }
};
// Get Option<&T> from Option<PyRef<T>>
quote! {
let #mut_ _tmp = match #arg_value.as_ref().filter(|obj| !obj.is_none()) {
Some(_obj) => {
Some(_obj.extract::<<#tref as pyo3::derive_utils::ExtractExt>::Target>()?)
},
None => #default,
};
let #arg_name = _tmp.#as_deref();
}
} else {
quote! {
let #arg_name = match #arg_value.as_ref().filter(|obj| !obj.is_none()) {
Some(_obj) => Some(_obj.extract()?),
None => #default,
};
}
}
} else if let Some(default) = spec.default_value(name) {
quote! {
let #arg_name = match #arg_value.as_ref().filter(|obj| !obj.is_none()) {
Some(_obj) => _obj.extract()?,
None => #default,
};
}
} else if let syn::Type::Reference(tref) = arg.ty {
let (tref, mut_) = tref_preprocess(tref);
// Get &T from PyRef<T>
quote! {
let #mut_ _tmp: <#tref as pyo3::derive_utils::ExtractExt>::Target
= #arg_value.unwrap().extract()?;
let #arg_name = &#mut_ *_tmp;
}
} else {
quote! {
let #arg_name = #arg_value.unwrap().extract()?;
}
};
fn tref_preprocess(tref: &syn::TypeReference) -> (syn::TypeReference, Option<syn::token::Mut>) {
let mut tref = tref.to_owned();
tref.lifetime = None;
let mut_ = tref.mutability;
(tref, mut_)
}
}
pub fn impl_py_method_def(spec: &FnSpec, wrapper: &TokenStream) -> TokenStream {
let python_name = &spec.python_name;
let doc = &spec.doc;
if spec.args.is_empty() {
quote! {
pyo3::class::PyMethodDefType::Method({
#wrapper
pyo3::class::PyMethodDef {
ml_name: stringify!(#python_name),
ml_meth: pyo3::class::PyMethodType::PyCFunction(__wrap),
ml_flags: pyo3::ffi::METH_NOARGS,
ml_doc: #doc,
}
})
}
} else {
quote! {
pyo3::class::PyMethodDefType::Method({
#wrapper
pyo3::class::PyMethodDef {
ml_name: stringify!(#python_name),
ml_meth: pyo3::class::PyMethodType::PyCFunctionWithKeywords(__wrap),
ml_flags: pyo3::ffi::METH_VARARGS | pyo3::ffi::METH_KEYWORDS,
ml_doc: #doc,
}
})
}
}
}
pub fn impl_py_method_def_new(spec: &FnSpec, wrapper: &TokenStream) -> TokenStream {
let python_name = &spec.python_name;
let doc = &spec.doc;
quote! {
pyo3::class::PyMethodDefType::New({
#wrapper
pyo3::class::PyMethodDef {
ml_name: stringify!(#python_name),
ml_meth: pyo3::class::PyMethodType::PyNewFunc(__wrap),
ml_flags: pyo3::ffi::METH_VARARGS | pyo3::ffi::METH_KEYWORDS,
ml_doc: #doc,
}
})
}
}
pub fn impl_py_method_def_class(spec: &FnSpec, wrapper: &TokenStream) -> TokenStream {
let python_name = &spec.python_name;
let doc = &spec.doc;
quote! {
pyo3::class::PyMethodDefType::Class({
#wrapper
pyo3::class::PyMethodDef {
ml_name: stringify!(#python_name),
ml_meth: pyo3::class::PyMethodType::PyCFunctionWithKeywords(__wrap),
ml_flags: pyo3::ffi::METH_VARARGS | pyo3::ffi::METH_KEYWORDS |
pyo3::ffi::METH_CLASS,
ml_doc: #doc,
}
})
}
}
pub fn impl_py_method_def_static(spec: &FnSpec, wrapper: &TokenStream) -> TokenStream {
let python_name = &spec.python_name;
let doc = &spec.doc;
quote! {
pyo3::class::PyMethodDefType::Static({
#wrapper
pyo3::class::PyMethodDef {
ml_name: stringify!(#python_name),
ml_meth: pyo3::class::PyMethodType::PyCFunctionWithKeywords(__wrap),
ml_flags: pyo3::ffi::METH_VARARGS | pyo3::ffi::METH_KEYWORDS | pyo3::ffi::METH_STATIC,
ml_doc: #doc,
}
})
}
}
pub fn impl_py_method_def_call(spec: &FnSpec, wrapper: &TokenStream) -> TokenStream {
let python_name = &spec.python_name;
let doc = &spec.doc;
quote! {
pyo3::class::PyMethodDefType::Call({
#wrapper
pyo3::class::PyMethodDef {
ml_name: stringify!(#python_name),
ml_meth: pyo3::class::PyMethodType::PyCFunctionWithKeywords(__wrap),
ml_flags: pyo3::ffi::METH_VARARGS | pyo3::ffi::METH_KEYWORDS,
ml_doc: #doc,
}
})
}
}
pub(crate) fn impl_py_setter_def(
python_name: &syn::Ident,
doc: &syn::LitStr,
wrapper: &TokenStream,
) -> TokenStream {
quote! {
pyo3::class::PyMethodDefType::Setter({
#wrapper
pyo3::class::PySetterDef {
name: stringify!(#python_name),
meth: __wrap,
doc: #doc,
}
})
}
}
pub(crate) fn impl_py_getter_def(
python_name: &syn::Ident,
doc: &syn::LitStr,
wrapper: &TokenStream,
) -> TokenStream {
quote! {
pyo3::class::PyMethodDefType::Getter({
#wrapper
pyo3::class::PyGetterDef {
name: stringify!(#python_name),
meth: __wrap,
doc: #doc,
}
})
}
}
/// Split an argument of pyo3::Python from the front of the arg list, if present
fn split_off_python_arg<'a>(args: &'a [FnArg<'a>]) -> (Option<&FnArg>, &[FnArg]) {
match args {
[py, rest @ ..] if utils::if_type_is_python(&py.ty) => (Some(py), rest),
rest => (None, rest),
}
}
| 33.63786 | 104 | 0.547141 |
dd07594b7db653c72c077d5d66f027d79093ff0e | 11,478 | //! Compile, validate, instantiate, serialize, and destroy modules.
use crate::{
error::{update_last_error, CApiError},
export::wasmer_import_export_kind,
import::{wasmer_import_object_t, wasmer_import_t, CAPIImportObject},
instance::{wasmer_instance_t, CAPIInstance},
wasmer_byte_array, wasmer_result_t,
};
use libc::c_int;
use std::collections::HashMap;
use std::ptr::NonNull;
use std::slice;
use wasmer::{Exports, Extern, Function, Global, ImportObject, Instance, Memory, Module, Table};
#[repr(C)]
pub struct wasmer_module_t;
#[repr(C)]
pub struct wasmer_serialized_module_t;
/// Creates a new Module from the given wasm bytes.
///
/// Returns `wasmer_result_t::WASMER_OK` upon success.
///
/// Returns `wasmer_result_t::WASMER_ERROR` upon failure. Use `wasmer_last_error_length`
/// and `wasmer_last_error_message` to get an error message.
#[allow(clippy::cast_ptr_alignment)]
#[no_mangle]
pub unsafe extern "C" fn wasmer_compile(
module: *mut *mut wasmer_module_t,
wasm_bytes: *mut u8,
wasm_bytes_len: u32,
) -> wasmer_result_t {
let bytes: &[u8] = slice::from_raw_parts_mut(wasm_bytes, wasm_bytes_len as usize);
let store = crate::get_global_store();
let result = Module::from_binary(store, bytes);
let new_module = match result {
Ok(instance) => instance,
Err(error) => {
update_last_error(error);
return wasmer_result_t::WASMER_ERROR;
}
};
*module = Box::into_raw(Box::new(new_module)) as *mut wasmer_module_t;
wasmer_result_t::WASMER_OK
}
/// Validates a sequence of bytes hoping it represents a valid WebAssembly module.
///
/// The function returns true if the bytes are valid, false otherwise.
///
/// Example:
///
/// ```c
/// bool result = wasmer_validate(bytes, bytes_length);
///
/// if (false == result) {
/// // Do something…
/// }
/// ```
#[allow(clippy::cast_ptr_alignment)]
#[no_mangle]
pub unsafe extern "C" fn wasmer_validate(wasm_bytes: *const u8, wasm_bytes_len: u32) -> bool {
if wasm_bytes.is_null() {
return false;
}
let bytes: &[u8] = slice::from_raw_parts(wasm_bytes, wasm_bytes_len as usize);
let store = crate::get_global_store();
Module::validate(store, bytes).is_ok()
}
/// Creates a new Instance from the given module and imports.
///
/// Returns `wasmer_result_t::WASMER_OK` upon success.
///
/// Returns `wasmer_result_t::WASMER_ERROR` upon failure. Use `wasmer_last_error_length`
/// and `wasmer_last_error_message` to get an error message.
#[allow(clippy::cast_ptr_alignment)]
#[no_mangle]
pub unsafe extern "C" fn wasmer_module_instantiate(
module: *const wasmer_module_t,
instance: *mut *mut wasmer_instance_t,
imports: *mut wasmer_import_t,
imports_len: c_int,
) -> wasmer_result_t {
let imports: &[wasmer_import_t] = slice::from_raw_parts(imports, imports_len as usize);
let mut imported_memories = vec![];
let mut import_object = ImportObject::new();
let mut namespaces = HashMap::new();
for import in imports {
let module_name = slice::from_raw_parts(
import.module_name.bytes,
import.module_name.bytes_len as usize,
);
let module_name = if let Ok(s) = std::str::from_utf8(module_name) {
s
} else {
update_last_error(CApiError {
msg: "error converting module name to string".to_string(),
});
return wasmer_result_t::WASMER_ERROR;
};
let import_name = slice::from_raw_parts(
import.import_name.bytes,
import.import_name.bytes_len as usize,
);
let import_name = if let Ok(s) = std::str::from_utf8(import_name) {
s
} else {
update_last_error(CApiError {
msg: "error converting import_name to string".to_string(),
});
return wasmer_result_t::WASMER_ERROR;
};
let namespace = namespaces.entry(module_name).or_insert_with(Exports::new);
let export = match import.tag {
wasmer_import_export_kind::WASM_MEMORY => {
let mem = import.value.memory as *mut Memory;
imported_memories.push(mem);
Extern::Memory((&*mem).clone())
}
wasmer_import_export_kind::WASM_FUNCTION => {
let func_export = import.value.func as *mut Function;
Extern::Function((&*func_export).clone())
}
wasmer_import_export_kind::WASM_GLOBAL => {
let global = import.value.global as *mut Global;
Extern::Global((&*global).clone())
}
wasmer_import_export_kind::WASM_TABLE => {
let table = import.value.table as *mut Table;
Extern::Table((&*table).clone())
}
};
namespace.insert(import_name, export);
}
for (module_name, namespace) in namespaces.into_iter() {
import_object.register(module_name, namespace);
}
let module = &*(module as *const Module);
let new_instance = match Instance::new(module, &import_object) {
Ok(instance) => instance,
Err(error) => {
update_last_error(error);
return wasmer_result_t::WASMER_ERROR;
}
};
let c_api_instance = CAPIInstance {
instance: new_instance,
imported_memories,
ctx_data: None,
};
*instance = Box::into_raw(Box::new(c_api_instance)) as *mut wasmer_instance_t;
wasmer_result_t::WASMER_OK
}
/// Given:
/// * A prepared `wasmer` import-object
/// * A compiled wasmer module
///
/// Instantiates a wasmer instance
#[no_mangle]
pub unsafe extern "C" fn wasmer_module_import_instantiate(
instance: *mut *mut wasmer_instance_t,
module: *const wasmer_module_t,
import_object: *const wasmer_import_object_t,
) -> wasmer_result_t {
// mutable to mutate through `instance_pointers_to_update` to make host functions work
let import_object: &mut CAPIImportObject = &mut *(import_object as *mut CAPIImportObject);
let module: &Module = &*(module as *const Module);
let new_instance: Instance = match Instance::new(module, &import_object.import_object) {
Ok(instance) => instance,
Err(error) => {
update_last_error(error);
return wasmer_result_t::WASMER_ERROR;
}
};
let c_api_instance = CAPIInstance {
instance: new_instance,
imported_memories: import_object.imported_memories.clone(),
ctx_data: None,
};
let c_api_instance_pointer = Box::into_raw(Box::new(c_api_instance));
for to_update in import_object.instance_pointers_to_update.iter_mut() {
to_update.as_mut().instance_ptr = Some(NonNull::new_unchecked(c_api_instance_pointer));
}
*instance = c_api_instance_pointer as *mut wasmer_instance_t;
return wasmer_result_t::WASMER_OK;
}
/// Serialize the given Module.
///
/// The caller owns the object and should call `wasmer_serialized_module_destroy` to free it.
///
/// Returns `wasmer_result_t::WASMER_OK` upon success.
///
/// Returns `wasmer_result_t::WASMER_ERROR` upon failure. Use `wasmer_last_error_length`
/// and `wasmer_last_error_message` to get an error message.
#[allow(clippy::cast_ptr_alignment)]
#[no_mangle]
pub unsafe extern "C" fn wasmer_module_serialize(
serialized_module_out: *mut *mut wasmer_serialized_module_t,
module: *const wasmer_module_t,
) -> wasmer_result_t {
let module = &*(module as *const Module);
match module.serialize() {
Ok(serialized_module) => {
let boxed_slice = serialized_module.into_boxed_slice();
*serialized_module_out = Box::into_raw(Box::new(boxed_slice)) as _;
wasmer_result_t::WASMER_OK
}
Err(_) => {
update_last_error(CApiError {
msg: "Failed to serialize the module".to_string(),
});
wasmer_result_t::WASMER_ERROR
}
}
}
/// Get bytes of the serialized module.
#[allow(clippy::cast_ptr_alignment)]
#[no_mangle]
pub unsafe extern "C" fn wasmer_serialized_module_bytes(
serialized_module: *const wasmer_serialized_module_t,
) -> wasmer_byte_array {
let serialized_module = &*(serialized_module as *const &[u8]);
wasmer_byte_array {
bytes: serialized_module.as_ptr(),
bytes_len: serialized_module.len() as u32,
}
}
/// Transform a sequence of bytes into a serialized module.
///
/// The caller owns the object and should call `wasmer_serialized_module_destroy` to free it.
///
/// Returns `wasmer_result_t::WASMER_OK` upon success.
///
/// Returns `wasmer_result_t::WASMER_ERROR` upon failure. Use `wasmer_last_error_length`
/// and `wasmer_last_error_message` to get an error message.
#[allow(clippy::cast_ptr_alignment)]
#[no_mangle]
pub unsafe extern "C" fn wasmer_serialized_module_from_bytes(
serialized_module: *mut *mut wasmer_serialized_module_t,
serialized_module_bytes: *const u8,
serialized_module_bytes_length: u32,
) -> wasmer_result_t {
if serialized_module.is_null() {
update_last_error(CApiError {
msg: "`serialized_module_bytes` pointer is null".to_string(),
});
return wasmer_result_t::WASMER_ERROR;
}
let serialized_module_bytes: &[u8] = slice::from_raw_parts(
serialized_module_bytes,
serialized_module_bytes_length as usize,
);
*serialized_module = Box::into_raw(Box::new(serialized_module_bytes)) as _;
wasmer_result_t::WASMER_OK
}
/// Deserialize the given serialized module.
///
/// Returns `wasmer_result_t::WASMER_OK` upon success.
///
/// Returns `wasmer_result_t::WASMER_ERROR` upon failure. Use `wasmer_last_error_length`
/// and `wasmer_last_error_message` to get an error message.
#[allow(dead_code, unused_variables)]
#[allow(clippy::cast_ptr_alignment)]
#[no_mangle]
pub unsafe extern "C" fn wasmer_module_deserialize(
module: *mut *mut wasmer_module_t,
serialized_module: Option<&wasmer_serialized_module_t>,
) -> wasmer_result_t {
let serialized_module: &[u8] = if let Some(sm) = serialized_module {
&*(sm as *const wasmer_serialized_module_t as *const &[u8])
} else {
update_last_error(CApiError {
msg: "`serialized_module` pointer is null".to_string(),
});
return wasmer_result_t::WASMER_ERROR;
};
let store = crate::get_global_store();
match Module::deserialize(store, serialized_module) {
Ok(deserialized_module) => {
*module = Box::into_raw(Box::new(deserialized_module)) as _;
wasmer_result_t::WASMER_OK
}
Err(e) => {
update_last_error(CApiError { msg: e.to_string() });
wasmer_result_t::WASMER_ERROR
}
}
}
/// Frees memory for the given serialized Module.
#[allow(clippy::cast_ptr_alignment)]
#[no_mangle]
pub extern "C" fn wasmer_serialized_module_destroy(
serialized_module: *mut wasmer_serialized_module_t,
) {
// TODO(mark): review all serialized logic memory logic
if !serialized_module.is_null() {
unsafe { Box::from_raw(serialized_module as *mut &[u8]) };
}
}
/// Frees memory for the given Module
#[allow(clippy::cast_ptr_alignment)]
#[no_mangle]
pub extern "C" fn wasmer_module_destroy(module: *mut wasmer_module_t) {
if !module.is_null() {
unsafe { Box::from_raw(module as *mut Module) };
}
}
| 34.365269 | 95 | 0.66658 |
9cc27b84835377bf020a15b6d87fc084d9094af8 | 1,613 | use flunion::vecmath::vector2::Vector2;
use flunion::{constants::*, step, Particle};
use tetra::{
graphics::{
self,
mesh::{Mesh, ShapeStyle},
Color, DrawParams,
},
math::Vec2,
Context, ContextBuilder, State,
};
fn init() -> Vec<Particle> {
let mut v = Vec::new();
let mut y = INITMIN[1];
while y <= INITMAX[1] {
let mut x = INITMIN[0];
while x <= INITMAX[0] {
println!("{}, {}", x, y);
v.push(Particle::new(Vector2::new(x, y)));
x += *D;
}
y += *D;
}
v
}
struct GameState {
particles: Vec<Particle>,
}
impl GameState {
fn new(_ctx: &mut Context) -> tetra::Result<Self> {
Ok(Self { particles: init() })
}
}
impl State for GameState {
fn update(&mut self, ctx: &mut Context) -> tetra::Result {
step(&mut self.particles);
tetra::window::set_title(ctx, format!("fluid fps:{:.02}", tetra::time::get_fps(ctx)));
Ok(())
}
fn draw(&mut self, ctx: &mut Context) -> tetra::Result {
graphics::clear(ctx, Color::BLACK);
for p in &self.particles {
let scale = 800.0 / 20.0;
let p = Vec2::new(
p.position[0] as f32 * scale,
800.0 - p.position[1] as f32 * scale,
);
let circle = Mesh::circle(ctx, ShapeStyle::Stroke(1.0), p, 20.0)?;
circle.draw(ctx, DrawParams::new());
}
Ok(())
}
}
fn main() -> tetra::Result {
ContextBuilder::new("fluid", 800, 800)
.build()?
.run(GameState::new)
}
| 22.71831 | 94 | 0.50155 |
db1ceaad96a468d9eb054a3f308faa970d77eff3 | 439 | use cosmwasm_std::StdError;
use thiserror::Error;
#[derive(Error, Debug, PartialEq)]
pub enum ContractError {
#[error("{0}")]
Std(#[from] StdError),
#[error("Unauthorized")]
Unauthorized {},
#[error("token_id already claimed")]
Claimed {},
#[error("Cannot set approval that is already expired")]
Expired {},
#[error("Approval not found for: {spender}")]
ApprovalNotFound { spender: String },
}
| 20.904762 | 59 | 0.633257 |
14eddc35a862b458ebd99c1e9f7f79baf122a39a | 10,024 | use std::io::{BufRead, Write};
use quick_xml::events::attributes::Attributes;
use quick_xml::events::{BytesEnd, BytesStart, BytesText, Event};
use quick_xml::Error as XmlError;
use quick_xml::Reader;
use quick_xml::Writer;
use crate::error::Error;
use crate::fromxml::FromXml;
use crate::toxml::ToXml;
use crate::util::{atom_text, atom_xhtml};
/// Represents the content of an Atom entry
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
#[derive(Debug, Default, Clone, PartialEq)]
#[cfg_attr(feature = "builders", derive(Builder))]
#[cfg_attr(
feature = "builders",
builder(
setter(into),
default,
build_fn(name = "build_impl", private, error = "never::Never")
)
)]
pub struct Content {
/// Base URL for resolving any relative references found in the element.
pub base: Option<String>,
/// Indicates the natural language for the element.
pub lang: Option<String>,
/// The text value of the content.
pub value: Option<String>,
/// The URI of where the content can be found.
pub src: Option<String>,
/// Either "text", "html", "xhtml", or the MIME type of the content.
pub content_type: Option<String>,
}
impl Content {
/// Return base URL of the content.
pub fn base(&self) -> Option<&str> {
self.base.as_deref()
}
/// Set base URL of the content.
pub fn set_base<V>(&mut self, base: V)
where
V: Into<Option<String>>,
{
self.base = base.into();
}
/// Return natural language of the content.
pub fn lang(&self) -> Option<&str> {
self.lang.as_deref()
}
/// Set the base URL of the content.
pub fn set_lang<V>(&mut self, lang: V)
where
V: Into<Option<String>>,
{
self.lang = lang.into();
}
/// Return the text value of the content.
///
/// If the `content_type` is neither `"text"`, `"html"`, or `"xhtml"` then the value should
/// be a base64 encoded document of the indicated MIME type.
///
/// # Examples
///
/// ```
/// use atom_syndication::Content;
///
/// let mut content = Content::default();
/// content.set_value("Example content".to_string());
/// assert_eq!(content.value(), Some("Example content"));
/// ```
pub fn value(&self) -> Option<&str> {
self.value.as_deref()
}
/// Set the text value of the content.
///
/// # Examples
///
/// ```
/// use atom_syndication::Content;
///
/// let mut content = Content::default();
/// content.set_value("Example content".to_string());
/// ```
pub fn set_value<V>(&mut self, value: V)
where
V: Into<Option<String>>,
{
self.value = value.into();
}
/// Return the URI where the content can be found.
///
/// # Examples
///
/// ```
/// use atom_syndication::Content;
///
/// let mut content = Content::default();
/// content.set_src("http://example.com/content.html".to_string());
/// assert_eq!(content.src(), Some("http://example.com/content.html"));
/// ```
pub fn src(&self) -> Option<&str> {
self.src.as_deref()
}
/// Set the URI where the content can be found.
///
/// # Examples
///
/// ```
/// use atom_syndication::Content;
///
/// let mut content = Content::default();
/// content.set_src("http://example.com/content.html".to_string());
/// ```
pub fn set_src<V>(&mut self, src: V)
where
V: Into<Option<String>>,
{
self.src = src.into();
}
/// Return the type of the content.
///
/// The type is either `"text"`, `"html"`, `"xhtml"`, or the MIME type of the content.
///
/// # Examples
///
/// ```
/// use atom_syndication::Content;
///
/// let mut content = Content::default();
/// content.set_content_type("image/png".to_string());
/// assert_eq!(content.content_type(), Some("image/png"));
/// ```
pub fn content_type(&self) -> Option<&str> {
self.content_type.as_deref()
}
/// Set the type of the content.
///
/// # Examples
///
/// ```
/// use atom_syndication::Content;
///
/// let mut content = Content::default();
/// content.set_content_type("image/png".to_string());
/// assert_eq!(content.content_type(), Some("image/png"));
/// ```
pub fn set_content_type<V>(&mut self, content_type: V)
where
V: Into<Option<String>>,
{
self.content_type = content_type.into();
}
}
impl FromXml for Content {
fn from_xml<B: BufRead>(
reader: &mut Reader<B>,
mut atts: Attributes<'_>,
) -> Result<Self, Error> {
let mut content = Content::default();
for att in atts.with_checks(false).flatten() {
match att.key {
b"xml:base" => content.base = Some(att.unescape_and_decode_value(reader)?),
b"xml:lang" => content.lang = Some(att.unescape_and_decode_value(reader)?),
b"type" => content.content_type = Some(att.unescape_and_decode_value(reader)?),
b"src" => content.src = Some(att.unescape_and_decode_value(reader)?),
_ => {}
}
}
content.value = match content.content_type {
Some(ref t) if t == "xhtml" => atom_xhtml(reader)?,
_ => atom_text(reader)?,
};
Ok(content)
}
}
impl ToXml for Content {
fn to_xml<W: Write>(&self, writer: &mut Writer<W>) -> Result<(), XmlError> {
let name = b"content";
let mut element = BytesStart::borrowed(name, name.len());
if let Some(ref base) = self.base {
element.push_attribute(("xml:base", base.as_str()));
}
if let Some(ref lang) = self.lang {
element.push_attribute(("xml:lang", lang.as_str()));
}
if let Some(ref content_type) = self.content_type {
if content_type == "xhtml" {
element.push_attribute(("type", "xhtml"));
} else {
element.push_attribute(("type", &**content_type));
}
}
if let Some(ref src) = self.src {
element.push_attribute(("src", &**src));
}
writer.write_event(Event::Start(element))?;
if let Some(ref value) = self.value {
writer.write_event(Event::Text(
if self.content_type.as_deref() == Some("xhtml") {
BytesText::from_escaped(value.as_bytes())
} else {
BytesText::from_plain(value.as_bytes())
},
))?;
}
writer.write_event(Event::End(BytesEnd::borrowed(name)))?;
Ok(())
}
}
#[cfg(feature = "builders")]
impl ContentBuilder {
/// Builds a new `Content`.
pub fn build(&self) -> Content {
self.build_impl().unwrap()
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::error::Error;
fn lines(text: &str) -> Vec<&str> {
text.lines()
.map(|line| line.trim())
.filter(|line| !line.is_empty())
.collect::<Vec<_>>()
}
fn to_xml(content: &Content) -> String {
let mut buffer = Vec::new();
let mut writer = Writer::new_with_indent(&mut buffer, b' ', 4);
content.to_xml(&mut writer).unwrap();
String::from_utf8(buffer).unwrap()
}
fn from_xml(xml: &str) -> Result<Content, Error> {
let mut reader = Reader::from_reader(xml.as_bytes());
reader.expand_empty_elements(true);
loop {
let mut buf = Vec::new();
match reader.read_event(&mut buf)? {
Event::Start(element) => {
if element.name() == b"content" {
let content = Content::from_xml(&mut reader, element.attributes())?;
return Ok(content);
} else {
return Err(Error::InvalidStartTag);
}
}
Event::Eof => return Err(Error::Eof),
_ => {}
}
}
}
#[test]
fn test_plain_text() {
let content = Content {
value: Some("Text with ampersand & <tag>.".into()),
..Default::default()
};
let xml_fragment = r#"<content>Text with ampersand & <tag>.</content>"#;
assert_eq!(to_xml(&content), xml_fragment);
assert_eq!(from_xml(xml_fragment).unwrap(), content);
}
#[test]
fn test_html() {
let content = Content {
content_type: Some("html".into()),
value: Some("Markup with ampersand, <tag>, & </closing-tag>.".into()),
..Default::default()
};
let xml_fragment = r#"<content type="html">Markup with ampersand, <tag>, & </closing-tag>.</content>"#;
assert_eq!(to_xml(&content), xml_fragment);
assert_eq!(from_xml(xml_fragment).unwrap(), content);
}
#[test]
fn test_xhtml() {
let content = Content {
content_type: Some("xhtml".into()),
value: Some(r#"<div>a line<br/>& one more</div>"#.into()),
..Default::default()
};
let xml_fragment =
r#"<content type="xhtml"><div>a line<br/>& one more</div></content>"#;
assert_eq!(to_xml(&content), xml_fragment);
assert_eq!(from_xml(xml_fragment).unwrap(), content);
}
#[test]
fn test_write_image() {
let content = Content {
content_type: Some("image/png".into()),
src: Some("http://example.com/image.png".into()),
..Default::default()
};
assert_eq!(
lines(&to_xml(&content)),
lines(
r#"
<content type="image/png" src="http://example.com/image.png">
</content>
"#
)
);
}
}
| 29.482353 | 127 | 0.534916 |
ab4870a8cb82d3eaa3548265c6969aa28ac681ee | 8,393 | use core::{fmt, panic};
use crate::{Locations, Trace};
/// Wraps a generic error value and keeps track of an error trace.
#[derive(Clone)]
pub struct Traced<E, T = Locations>
where
// NOTE: this trait bound has to be in the struct definition, otherwise we
// won't be allowed to use the trait bound when implementing `Traced`.
// This is because of the restrictions of `feature(min_specialization)`
// imparted by `#[rustc_specialization_trait]`.
T: Trace,
{
inner: E,
trace: T,
}
impl<E, T> Traced<E, T>
where
T: Trace + Default,
{
/// Wraps the given error and starts a new trace with the caller's location.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use tres::Traced;
///
/// let x: Traced<&str> = Traced::new("Oops!");
/// ```
///
/// Showing that the trace includes the caller of `new()`:
///
/// ```
/// use std::panic::Location;
/// use tres::{Locations, Traced};
///
/// let here: &Location = Location::caller();
/// let x: Traced<&str> = Traced::new("Oops!");
///
/// let locs: &Locations = x.trace();
/// let there: &Location = locs.0.first().unwrap();
/// assert_eq!(there.line(), here.line() + 1);
/// ```
///
/// Using a custom trace type:
///
/// ```
/// use std::panic::Location;
/// use tres::{Trace, Traced};
///
/// #[derive(Default)]
/// struct BangTrace(pub String);
///
/// impl Trace for BangTrace {
/// fn trace(&mut self, _location: &'static Location) {
/// self.0.push('!');
/// }
/// }
///
/// let x: Traced<&str, BangTrace> = Traced::new("Oops!");
/// assert_eq!(&x.trace().0, "!");
/// ```
#[track_caller]
pub fn new(inner: E) -> Self {
let mut trace: T = Default::default();
trace.trace(panic::Location::caller());
Self { inner, trace }
}
/// Wraps the given error and starts a new, empty trace.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use tres::Traced;
///
/// let x: Traced<&str> = Traced::empty("Oops!");
/// assert!(x.trace().0.is_empty());
/// ```
pub fn empty(inner: E) -> Self {
Self {
inner,
trace: Default::default(),
}
}
}
impl<E, T> Traced<E, T>
where
T: Trace,
{
/// Returns a reference to the contained error value.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use tres::Traced;
///
/// let x: Traced<String> = Traced::new("Oops!".to_string());
///
/// let inner: &String = x.inner();
/// assert_eq!(inner.as_str(), "Oops!");
/// ```
pub fn inner(&self) -> &E {
&self.inner
}
/// Returns a reference to the error trace.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use tres::{Locations, Traced};
///
/// let x: Traced<String> = Traced::new("Oops!".to_string());
///
/// let trace: &Locations = x.trace();
/// assert_eq!(trace.0.len(), 1);
/// ```
pub fn trace(&self) -> &T {
&self.trace
}
/// Constructs a new `Traced` from an error value and a trace.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use tres::{Locations, Traced};
///
/// let error: String = "Oops!".into();
/// let trace: Locations = Default::default();
///
/// let x = Traced::from_parts(error, trace);
/// assert_eq!(x.inner(), &"Oops!");
/// assert_eq!(x.trace(), &Locations(vec![]));
/// ```
pub fn from_parts(inner: E, trace: T) -> Self {
Self { inner, trace }
}
/// Returns the contained error value and error trace, consuming self.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use tres::{Locations, Traced};
///
/// let x: Traced<String> = Traced::new("Oops!".to_string());
///
/// let (error, trace): (String, Locations) = x.into_parts();
/// assert_eq!(error, "Oops!".to_string());
/// assert_eq!(trace.0.len(), 1);
/// ```
pub fn into_parts(self) -> (E, T) {
let Self { inner, trace } = self;
(inner, trace)
}
/// Returns the contained error value, consuming self and discarding the trace.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use tres::Traced;
///
/// let x: Traced<String> = Traced::new("Oops!".to_string());
///
/// let error: String = x.into_inner();
/// assert_eq!(error, "Oops!".to_string());
/// ```
pub fn into_inner(self) -> E {
let (inner, _trace) = self.into_parts();
inner
}
/// Maps a `Traced<E, T>` to `Traced<F, T>` by applying a function
/// to the contained error value, leaving the error trace untouched.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use tres::Traced;
///
/// let x: Traced<u32> = Traced::new(42);
/// assert_eq!(x.trace().0.len(), 1);
///
/// let x: Traced<String> = x.map(|i| i.to_string());
/// assert_eq!(x.trace().0.len(), 1);
/// ```
pub fn map<F, O: FnOnce(E) -> F>(self, op: O) -> Traced<F, T> {
Traced {
inner: op(self.inner),
trace: self.trace,
}
}
}
/// The whole point. Enables tracing via `?` when used as an [`Err`] variant.
///
/// [`Err`]: crate::result::Result::Err
impl<E, T> crate::result::Trace for Traced<E, T>
where
T: Trace,
{
#[inline]
fn trace(&mut self, location: &'static panic::Location) {
self.trace.trace(location);
}
}
impl<E, T> fmt::Display for Traced<E, T>
where
E: fmt::Display,
T: Trace + fmt::Display,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}: {}", &self.inner, &self.trace)
}
}
impl<E, T> fmt::Debug for Traced<E, T>
where
E: fmt::Debug,
T: Trace + fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}: {:?}", &self.inner, &self.trace)
}
}
/////////////////////////////////////////////////////////////////////////////
// Blanket From impls
/////////////////////////////////////////////////////////////////////////////
/// An auto trait used to determine if two types are the same.
pub auto trait NotSame {}
impl<T> !NotSame for (T, T) {}
/// An auto trait used to determine if a type is a `Traced`.
pub auto trait NotTraced {}
impl<E, T: Trace> !NotTraced for Traced<E, T> {}
// Auto traits do not apply to non-sized types (e.g., `dyn Trait`), so we have
// to manually write positive implementations of the above two traits for things
// that might contain those types.
impl<T: ?Sized> NotSame for Box<T> {}
impl<T: ?Sized> NotTraced for Box<T> {}
/// Enables `?` conversion from `Traced<E, T>` to `Traced<F, T>`, as
/// long as `F: From<E>`.
///
/// # Examples
///
/// ```
/// use tres::{Result, Result::Err, Result::Ok, Traced};
///
/// fn foo() -> Result<(), Traced<String>> {
/// Ok(bar()?)
/// }
///
/// fn bar() -> Result<(), Traced<&'static str>> {
/// Err(Traced::new("Oops!"))
/// }
///
/// let x: Traced<String> = foo().unwrap_err();
/// assert_eq!(x.inner(), "Oops!");
/// assert_eq!(x.trace().0.len(), 2);
/// ```
impl<E, F, T> From<Traced<E, T>> for Traced<F, T>
where
F: From<E>,
(E, F): NotSame,
T: Trace,
{
#[inline]
fn from(source: Traced<E, T>) -> Self {
Self {
inner: From::from(source.inner),
trace: source.trace,
}
}
}
/// Enables `?` conversion from `E` to `Traced<F, T>`, as long as `F: From<E>`.
///
/// # Examples
///
/// ```
/// use tres::Traced;
///
/// fn foo() -> Result<(), Traced<String>> {
/// Ok(bar()?)
/// }
///
/// fn bar() -> Result<(), &'static str> {
/// Err("Oops!")
/// }
///
/// let x: Traced<String> = foo().unwrap_err();
/// assert_eq!(x.inner(), "Oops!");
/// ```
impl<E, F, T> From<E> for Traced<F, T>
where
E: NotTraced,
F: From<E>,
T: Trace + Default,
{
fn from(source: E) -> Self {
Self {
inner: From::from(source),
// Use default() here, as we should already be inside a `?`
// invocation, and that will append the location for us.
trace: Default::default(),
}
}
}
| 24.979167 | 83 | 0.496843 |
e584d20c6ff373a973d974867169d2e96f7a7406 | 2,014 | use crate::data::primitive::int::PrimitiveInt;
use crate::data::{ast::*, Data, Literal, MessageData, MSG};
use crate::error_format::ErrorInfo;
use crate::interpreter::interpret_scope;
use crate::interpreter::variable_handler::expr_to_literal::expr_to_literal;
use crate::parser::ExitCondition;
use std::sync::mpsc;
////////////////////////////////////////////////////////////////////////////////
// PUBLIC FUNCTION
////////////////////////////////////////////////////////////////////////////////
pub fn for_loop(
ident: &Identifier,
i: &Option<Identifier>,
expr: &Expr,
block: &Block,
range: &RangeInterval,
mut root: MessageData,
data: &mut Data,
instruction_index: Option<usize>,
sender: &Option<mpsc::Sender<MSG>>,
) -> Result<MessageData, ErrorInfo> {
let literal = expr_to_literal(expr, None, data, &mut root, sender)?;
let array = match Literal::get_value::<Vec<Literal>>(&literal.primitive) {
Ok(res) => res,
Err(_) => {
return Err(ErrorInfo {
message: "invalid Expression in foreach loop, Expression is not iterable"
.to_owned(),
interval: range.start.to_owned(),
})
}
};
for (value, elem) in array.iter().enumerate() {
data.step_vars
.insert(ident.ident.to_owned(), elem.to_owned());
if let Some(index) = i {
data.step_vars.insert(
index.ident.to_owned(),
PrimitiveInt::get_literal(value as i64, elem.interval.to_owned()),
);
};
root = root + interpret_scope(block, data, instruction_index, sender)?;
if let Some(ExitCondition::Break) = root.exit_condition {
root.exit_condition = None;
break;
} else if root.exit_condition.is_some() {
break;
}
}
data.step_vars.remove(&ident.ident);
if let Some(index) = i {
data.step_vars.remove(&index.ident);
};
Ok(root)
}
| 33.016393 | 89 | 0.548659 |
f57793bf319a0a2215eec2ada8e9bc73a2ea11f5 | 10,034 | use crate::net::Shutdown;
use crate::os::unix::net::SocketAddr;
use io_lifetimes::{AsFd, BorrowedFd, FromFd, IntoFd, OwnedFd};
use std::fmt;
use std::io::{self, IoSlice, IoSliceMut, Read, Write};
use std::os::unix;
use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
use std::time::Duration;
/// A Unix stream socket.
///
/// This corresponds to [`std::os::unix::net::UnixStream`].
///
/// Note that this `UnixStream` has no `connect` method. To create a
/// `UnixStream`, you must first obtain a [`Dir`] containing the path, and then
/// call [`Dir::connect_unix_stream`].
///
/// [`std::os::unix::net::UnixStream`]: https://doc.rust-lang.org/std/os/unix/net/struct.UnixStream.html
/// [`Dir`]: struct.Dir.html
/// [`Dir::connect_unix_stream`]: struct.Dir.html#method.connect_unix_stream
pub struct UnixStream {
std: unix::net::UnixStream,
}
impl UnixStream {
/// Constructs a new instance of `Self` from the given
/// `std::os::unix::net::UnixStream`.
///
/// This grants access the resources the `std::os::unix::net::UnixStream`
/// instance already has access to.
#[inline]
pub fn from_std(std: unix::net::UnixStream) -> Self {
Self { std }
}
/// Creates an unnamed pair of connected sockets.
///
/// This corresponds to [`std::os::unix::net::UnixStream::pair`].
///
/// TODO: should this require a capability?
///
/// [`std::os::unix::net::UnixStream::pair`]: https://doc.rust-lang.org/std/os/unix/net/struct.UnixStream.html#method.pair
#[inline]
pub fn pair() -> io::Result<(Self, Self)> {
unix::net::UnixStream::pair().map(|(a, b)| (Self::from_std(a), Self::from_std(b)))
}
/// Creates a new independently owned handle to the underlying socket.
///
/// This corresponds to [`std::os::unix::net::UnixStream::try_clone`].
///
/// [`std::os::unix::net::UnixStream::try_clone`]: https://doc.rust-lang.org/std/os/unix/net/struct.UnixStream.html#method.try_clone
#[inline]
pub fn try_clone(&self) -> io::Result<Self> {
let unix_stream = self.std.try_clone()?;
Ok(Self::from_std(unix_stream))
}
/// Returns the socket address of the local half of this connection.
///
/// This corresponds to [`std::os::unix::net::UnixStream::local_addr`].
///
/// [`std::os::unix::net::UnixStream::local_addr`]: https://doc.rust-lang.org/std/os/unix/net/struct.UnixStream.html#method.local_addr
#[inline]
pub fn local_addr(&self) -> io::Result<SocketAddr> {
self.std.local_addr()
}
/// Returns the socket address of the remote half of this connection.
///
/// This corresponds to [`std::os::unix::net::UnixStream::peer_addr`].
///
/// [`std::os::unix::net::UnixStream::peer_addr`]: https://doc.rust-lang.org/std/os/unix/net/struct.UnixStream.html#method.peer_addr
#[inline]
pub fn peer_addr(&self) -> io::Result<SocketAddr> {
self.std.peer_addr()
}
/// Sets the read timeout for the socket.
///
/// This corresponds to
/// [`std::os::unix::net::UnixStream::set_read_timeout`].
///
/// [`std::os::unix::net::UnixStream::set_read_timeout`]: https://doc.rust-lang.org/std/os/unix/net/struct.UnixStream.html#method.set_read_timeout
#[inline]
pub fn set_read_timeout(&self, timeout: Option<Duration>) -> io::Result<()> {
self.std.set_read_timeout(timeout)
}
/// Sets the write timeout for the socket.
///
/// This corresponds to
/// [`std::os::unix::net::UnixStream::set_write_timeout`].
///
/// [`std::os::unix::net::UnixStream::set_write_timeout`]: https://doc.rust-lang.org/std/os/unix/net/struct.UnixStream.html#method.set_write_timeout
#[inline]
pub fn set_write_timeout(&self, timeout: Option<Duration>) -> io::Result<()> {
self.std.set_write_timeout(timeout)
}
/// Returns the read timeout of this socket.
///
/// This corresponds to [`std::os::unix::net::UnixStream::read_timeout`].
///
/// [`std::os::unix::net::UnixStream::read_timeout`]: https://doc.rust-lang.org/std/os/unix/net/struct.UnixStream.html#method.read_timeout
#[inline]
pub fn read_timeout(&self) -> io::Result<Option<Duration>> {
self.std.read_timeout()
}
/// Returns the write timeout of this socket.
///
/// This corresponds to [`std::os::unix::net::UnixStream::write_timeout`].
///
/// [`std::os::unix::net::UnixStream::write_timeout`]: https://doc.rust-lang.org/std/os/unix/net/struct.UnixStream.html#method.write_timeout
#[inline]
pub fn write_timeout(&self) -> io::Result<Option<Duration>> {
self.std.write_timeout()
}
/// Moves the socket into or out of nonblocking mode.
///
/// This corresponds to
/// [`std::os::unix::net::UnixStream::set_nonblocking`].
///
/// [`std::os::unix::net::UnixStream::set_nonblocking`]: https://doc.rust-lang.org/std/os/unix/net/struct.UnixStream.html#method.set_nonblocking
#[inline]
pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
self.std.set_nonblocking(nonblocking)
}
/// Returns the value of the `SO_ERROR` option.
///
/// This corresponds to [`std::os::unix::net::UnixStream::take_error`].
///
/// [`std::os::unix::net::UnixStream::take_error`]: https://doc.rust-lang.org/std/os/unix/net/struct.UnixStream.html#method.take_error
#[inline]
pub fn take_error(&self) -> io::Result<Option<io::Error>> {
self.std.take_error()
}
/// Shuts down the read, write, or both halves of this connection.
///
/// This corresponds to [`std::os::unix::net::UnixStream::shutdown`].
///
/// [`std::os::unix::net::UnixStream::shutdown`]: https://doc.rust-lang.org/std/os/unix/net/struct.UnixStream.html#method.shutdown
#[inline]
pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
self.std.shutdown(how)
}
}
impl FromRawFd for UnixStream {
#[inline]
unsafe fn from_raw_fd(fd: RawFd) -> Self {
Self::from_std(unix::net::UnixStream::from_raw_fd(fd))
}
}
impl FromFd for UnixStream {
#[inline]
fn from_fd(fd: OwnedFd) -> Self {
Self::from_std(unix::net::UnixStream::from_fd(fd))
}
}
impl AsRawFd for UnixStream {
#[inline]
fn as_raw_fd(&self) -> RawFd {
self.std.as_raw_fd()
}
}
impl AsFd for UnixStream {
#[inline]
fn as_fd(&self) -> BorrowedFd<'_> {
self.std.as_fd()
}
}
impl IntoRawFd for UnixStream {
#[inline]
fn into_raw_fd(self) -> RawFd {
self.std.into_raw_fd()
}
}
impl IntoFd for UnixStream {
#[inline]
fn into_fd(self) -> OwnedFd {
self.std.into_fd()
}
}
impl Read for UnixStream {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.std.read(buf)
}
#[inline]
fn read_vectored(&mut self, bufs: &mut [IoSliceMut]) -> io::Result<usize> {
self.std.read_vectored(bufs)
}
#[inline]
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
self.std.read_exact(buf)
}
#[inline]
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
self.std.read_to_end(buf)
}
#[inline]
fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> {
self.std.read_to_string(buf)
}
#[cfg(can_vector)]
#[inline]
fn is_read_vectored(&self) -> bool {
self.std.is_read_vectored()
}
}
impl Read for &UnixStream {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
(&mut &self.std).read(buf)
}
#[inline]
fn read_vectored(&mut self, bufs: &mut [IoSliceMut]) -> io::Result<usize> {
(&mut &self.std).read_vectored(bufs)
}
#[inline]
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
(&mut &self.std).read_exact(buf)
}
#[inline]
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
(&mut &self.std).read_to_end(buf)
}
#[inline]
fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> {
(&mut &self.std).read_to_string(buf)
}
#[cfg(can_vector)]
#[inline]
fn is_read_vectored(&self) -> bool {
self.std.is_read_vectored()
}
}
impl Write for UnixStream {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.std.write(buf)
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
self.std.flush()
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice]) -> io::Result<usize> {
self.std.write_vectored(bufs)
}
#[inline]
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
self.std.write_all(buf)
}
#[cfg(can_vector)]
#[inline]
fn is_write_vectored(&self) -> bool {
self.std.is_write_vectored()
}
#[cfg(write_all_vectored)]
#[inline]
fn write_all_vectored(&mut self, bufs: &mut [IoSlice]) -> io::Result<()> {
self.std.write_all_vectored(bufs)
}
}
impl Write for &UnixStream {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
(&mut &self.std).write(buf)
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
(&mut &self.std).flush()
}
#[inline]
fn write_vectored(&mut self, bufs: &[IoSlice]) -> io::Result<usize> {
(&mut &self.std).write_vectored(bufs)
}
#[inline]
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
(&mut &self.std).write_all(buf)
}
#[cfg(can_vector)]
#[inline]
fn is_write_vectored(&self) -> bool {
self.std.is_write_vectored()
}
#[cfg(write_all_vectored)]
#[inline]
fn write_all_vectored(&mut self, bufs: &mut [IoSlice]) -> io::Result<()> {
(&mut &self.std).write_all_vectored(bufs)
}
}
impl fmt::Debug for UnixStream {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.std.fmt(f)
}
}
| 30.041916 | 152 | 0.602053 |
0820614ab5c810a6535ca30ec36b768ec5b72228 | 3,582 | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(unused_features)]
#![feature(box_syntax)]
struct Fat<T: ?Sized> {
f1: isize,
f2: &'static str,
ptr: T
}
// x is a fat pointer
fn foo(x: &Fat<[isize]>) {
let y = &x.ptr;
assert_eq!(x.ptr.len(), 3);
assert_eq!(y[0], 1);
assert_eq!(x.ptr[1], 2);
assert_eq!(x.f1, 5);
assert_eq!(x.f2, "some str");
}
fn foo2<T:ToBar>(x: &Fat<[T]>) {
let y = &x.ptr;
let bar = Bar;
assert_eq!(x.ptr.len(), 3);
assert_eq!(y[0].to_bar(), bar);
assert_eq!(x.ptr[1].to_bar(), bar);
assert_eq!(x.f1, 5);
assert_eq!(x.f2, "some str");
}
fn foo3(x: &Fat<Fat<[isize]>>) {
let y = &x.ptr.ptr;
assert_eq!(x.f1, 5);
assert_eq!(x.f2, "some str");
assert_eq!(x.ptr.f1, 8);
assert_eq!(x.ptr.f2, "deep str");
assert_eq!(x.ptr.ptr.len(), 3);
assert_eq!(y[0], 1);
assert_eq!(x.ptr.ptr[1], 2);
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
struct Bar;
trait ToBar {
fn to_bar(&self) -> Bar;
}
impl ToBar for Bar {
fn to_bar(&self) -> Bar {
*self
}
}
pub fn main() {
// With a vec of ints.
let f1 : Fat<[isize; 3]> = Fat { f1: 5, f2: "some str", ptr: [1, 2, 3] };
foo(&f1);
let f2 : &Fat<[isize; 3]> = &f1;
foo(f2);
let f3: &Fat<[isize]> = f2;
foo(f3);
let f4: &Fat<[isize]> = &f1;
foo(f4);
let f5: &Fat<[isize]> = &Fat { f1: 5, f2: "some str", ptr: [1, 2, 3] };
foo(f5);
// With a vec of Bars.
let bar = Bar;
let f1 = Fat { f1: 5, f2: "some str", ptr: [bar, bar, bar] };
foo2(&f1);
let f2 = &f1;
foo2(f2);
let f3: &Fat<[Bar]> = f2;
foo2(f3);
let f4: &Fat<[Bar]> = &f1;
foo2(f4);
let f5: &Fat<[Bar]> = &Fat { f1: 5, f2: "some str", ptr: [bar, bar, bar] };
foo2(f5);
// Assignment.
let f5: &mut Fat<[isize]> = &mut Fat { f1: 5, f2: "some str", ptr: [1, 2, 3] };
f5.ptr[1] = 34;
assert_eq!(f5.ptr[0], 1);
assert_eq!(f5.ptr[1], 34);
assert_eq!(f5.ptr[2], 3);
// Zero size vec.
let f5: &Fat<[isize]> = &Fat { f1: 5, f2: "some str", ptr: [] };
assert!(f5.ptr.is_empty());
let f5: &Fat<[Bar]> = &Fat { f1: 5, f2: "some str", ptr: [] };
assert!(f5.ptr.is_empty());
// Deeply nested.
let f1 = Fat { f1: 5, f2: "some str", ptr: Fat { f1: 8, f2: "deep str", ptr: [1, 2, 3]} };
foo3(&f1);
let f2 = &f1;
foo3(f2);
let f3: &Fat<Fat<[isize]>> = f2;
foo3(f3);
let f4: &Fat<Fat<[isize]>> = &f1;
foo3(f4);
let f5: &Fat<Fat<[isize]>> =
&Fat { f1: 5, f2: "some str", ptr: Fat { f1: 8, f2: "deep str", ptr: [1, 2, 3]} };
foo3(f5);
// Box.
let f1 = Box::new([1, 2, 3]);
assert_eq!((*f1)[1], 2);
let f2: Box<[isize]> = f1;
assert_eq!((*f2)[1], 2);
// Nested Box.
let f1 : Box<Fat<[isize; 3]>> = box Fat { f1: 5, f2: "some str", ptr: [1, 2, 3] };
foo(&*f1);
let f2 : Box<Fat<[isize]>> = f1;
foo(&*f2);
// FIXME (#22405): Replace `Box::new` with `box` here when/if possible.
let f3 : Box<Fat<[isize]>> =
Box::<Fat<[_; 3]>>::new(Fat { f1: 5, f2: "some str", ptr: [1, 2, 3] });
foo(&*f3);
}
| 26.533333 | 94 | 0.52038 |
1dca2bd0749da4edc8d5226f5742742ee86c9763 | 113 | #[derive(Debug)]
pub struct BasicState {
pub counter: i32,
}
pub type BasicPlugin = cr::Plugin<BasicState>;
| 16.142857 | 46 | 0.699115 |
d60934131e8efb6299fdb098e988d1c7396df753 | 900 | use std::convert::Into;
use std::collections::BTreeMap;
use super::*;
#[derive(Clone, Debug, Default, PartialEq)]
pub struct FunctionCall {
identifier: String,
arguments: BTreeMap<String, ArgKey>,
}
impl FunctionCall {
pub fn new(identifier: String) -> Self {
FunctionCall { identifier: identifier.trim().to_owned(), ..Self::default() }
}
pub fn identifier(&self) -> &str {
&self.identifier
}
pub fn args(&self) -> &BTreeMap<String, ArgKey> {
&self.arguments
}
pub fn add_value_arg<SK: AsRef<str>, SV: Into<String>>(&mut self, key: SK, value: SV) {
self.arguments.insert(key.as_ref().trim().into(), ArgKey::Json(value.into()));
}
pub fn add_component_arg<SK: AsRef<str>, SV: Into<String>>(&mut self, key: SK, value: SV) {
self.arguments.insert(key.as_ref().trim().into(), ArgKey::Comp(value.into()));
}
}
| 27.272727 | 95 | 0.623333 |
75bc8d65e26528d7da095db3566852262a3a59fb | 8,468 | use std::cmp::max;
use std::fmt::Write;
use std::time::{Duration, Instant};
use libusb::*;
use rand::prelude::*;
use usb_device::test_class;
use crate::device::*;
pub type TestFn = fn(&mut DeviceHandles, &mut String) -> ();
const BENCH_TIMEOUT: Duration = Duration::from_secs(10);
macro_rules! tests {
{ $(fn $name:ident($dev:ident, $out:ident) $body:expr)* } => {
pub fn get_tests() -> Vec<(&'static str, TestFn)> {
let mut tests: Vec<(&'static str, TestFn)> = Vec::new();
$(
fn $name($dev: &mut DeviceHandles<'_>, $out: &mut String) {
$body
}
tests.push((stringify!($name), $name));
)*
tests
}
}
}
tests! {
fn control_request(dev, _out) {
let mut rng = rand::thread_rng();
let value: u16 = rng.gen();
let index: u16 = rng.gen();
let data = random_data(rng.gen_range(0, 16));
let mut expected = [0u8; 8];
expected[0] = (0x02 as u8) << 5;
expected[1] = test_class::REQ_STORE_REQUEST;
expected[2..4].copy_from_slice(&value.to_le_bytes());
expected[4..6].copy_from_slice(&index.to_le_bytes());
expected[6..8].copy_from_slice(&(data.len() as u16).to_le_bytes());
assert_eq!(
dev.write_control(
request_type(Direction::Out, RequestType::Vendor, Recipient::Device),
test_class::REQ_STORE_REQUEST, value, index,
&data, TIMEOUT).expect("control write"),
data.len());
let mut response = [0u8; 8];
assert_eq!(
dev.read_control(
request_type(Direction::In, RequestType::Vendor, Recipient::Device),
test_class::REQ_READ_BUFFER, 0, 0,
&mut response, TIMEOUT).expect("control read"),
response.len());
assert_eq!(&response, &expected);
}
fn control_data(dev, _out) {
for len in &[0, 7, 8, 9, 15, 16, 17] {
let data = random_data(*len);
assert_eq!(
dev.write_control(
request_type(Direction::Out, RequestType::Vendor, Recipient::Device),
test_class::REQ_WRITE_BUFFER, 0, 0,
&data, TIMEOUT).expect(&format!("control write len {}", len)),
data.len());
let mut response = vec![0u8; *len];
assert_eq!(
dev.read_control(
request_type(Direction::In, RequestType::Vendor, Recipient::Device),
test_class::REQ_READ_BUFFER, 0, 0,
&mut response, TIMEOUT).expect(&format!("control read len {}", len)),
data.len());
assert_eq!(&response, &data);
}
}
fn control_data_static(dev, _out) {
let mut response = [0u8; 257];
assert_eq!(
dev.read_control(
request_type(Direction::In, RequestType::Vendor, Recipient::Device),
test_class::REQ_READ_LONG_DATA, 0, 0,
&mut response, TIMEOUT).expect("control read"),
response.len());
assert_eq!(&response[..], test_class::LONG_DATA);
}
fn control_error(dev, _out) {
let res = dev.write_control(
request_type(Direction::Out, RequestType::Vendor, Recipient::Device),
test_class::REQ_UNKNOWN, 0, 0,
&[], TIMEOUT);
if res.is_ok() {
panic!("unknown control request succeeded");
}
}
fn string_descriptors(dev, _out) {
assert_eq!(
dev.read_product_string(dev.en_us, &dev.device_descriptor, TIMEOUT)
.expect("read product string"),
test_class::PRODUCT);
assert_eq!(
dev.read_manufacturer_string(dev.en_us, &dev.device_descriptor, TIMEOUT)
.expect("read manufacturer string"),
test_class::MANUFACTURER);
assert_eq!(
dev.read_serial_number_string(dev.en_us, &dev.device_descriptor, TIMEOUT)
.expect("read serial number string"),
test_class::SERIAL_NUMBER);
assert_eq!(
dev.read_string_descriptor(dev.en_us, 4, TIMEOUT)
.expect("read custom string"),
test_class::CUSTOM_STRING);
}
fn interface_descriptor(dev, _out) {
let iface = dev.config_descriptor
.interfaces()
.find(|i| i.number() == 0)
.expect("interface not found");
let default_alt_setting = iface.descriptors()
.find(|i| i.setting_number() == 0)
.expect("default alt setting not found");
assert_eq!(default_alt_setting.description_string_index(), None);
assert_eq!(default_alt_setting.class_code(), 0xff);
assert_eq!(default_alt_setting.sub_class_code(), 0x00);
let second_alt_setting = iface.descriptors()
.find(|i| i.setting_number() == 1)
.expect("second alt setting not found");
assert_eq!(second_alt_setting.class_code(), 0xff);
assert_eq!(second_alt_setting.sub_class_code(), 0x01);
let string_index = second_alt_setting.description_string_index()
.expect("second alt setting string is undefined");
assert_eq!(
dev.read_string_descriptor(dev.en_us, string_index, TIMEOUT)
.expect("read interface string"),
test_class::INTERFACE_STRING);
}
fn bulk_loopback(dev, _out) {
for len in &[0, 1, 2, 32, 63, 64, 65, 127, 128, 129] {
let data = random_data(*len);
assert_eq!(
dev.write_bulk(0x01, &data, TIMEOUT)
.expect(&format!("bulk write len {}", len)),
data.len(),
"bulk write len {}", len);
if *len > 0 && *len % 64 == 0 {
assert_eq!(
dev.write_bulk(0x01, &[], TIMEOUT)
.expect(&format!("bulk write zero-length packet")),
0,
"bulk write zero-length packet");
}
// Prevent libusb from instantaneously reading an empty packet on Windows when
// zero-sized buffer is passed.
let mut response = vec![0u8; max(*len, 1)];
assert_eq!(
dev.read_bulk(0x81, &mut response, TIMEOUT)
.expect(&format!("bulk read len {}", len)),
data.len(),
"bulk read len {}", len);
assert_eq!(&response[..*len], &data[..]);
}
}
fn interrupt_loopback(dev, _out) {
for len in &[0, 1, 2, 15, 31] {
let data = random_data(*len);
assert_eq!(
dev.write_interrupt(0x02, &data, TIMEOUT)
.expect(&format!("interrupt write len {}", len)),
data.len(),
"interrupt write len {}", len);
// Prevent libusb from instantaneously reading an empty packet on Windows when
// zero-sized buffer is passed.
let mut response = vec![0u8; max(*len, 1)];
assert_eq!(
dev.read_interrupt(0x82, &mut response, TIMEOUT)
.expect(&format!("interrupt read len {}", len)),
data.len(),
"interrupt read len {}", len);
assert_eq!(&response[..*len], &data[..]);
}
}
fn bench_bulk_write(dev, out) {
run_bench(dev, out, |data| {
assert_eq!(
dev.write_bulk(0x01, data, BENCH_TIMEOUT)
.expect("bulk write"),
data.len(),
"bulk write");
});
}
fn bench_bulk_read(dev, out) {
run_bench(dev, out, |data| {
assert_eq!(
dev.read_bulk(0x81, data, BENCH_TIMEOUT)
.expect("bulk read"),
data.len(),
"bulk read");
});
}
}
fn run_bench(dev: &DeviceHandles, out: &mut String, f: impl Fn(&mut [u8]) -> ()) {
const TRANSFER_BYTES: usize = 64 * 1024;
const TRANSFERS: usize = 16;
const TOTAL_BYTES: usize = TRANSFER_BYTES * TRANSFERS;
dev.write_control(
request_type(Direction::Out, RequestType::Vendor, Recipient::Device),
test_class::REQ_SET_BENCH_ENABLED, 1, 0,
&[], TIMEOUT).expect("enable bench mode");
let mut data = random_data(TRANSFER_BYTES);
let start = Instant::now();
for _ in 0..TRANSFERS {
f(&mut data);
}
let elapsed = start.elapsed();
let elapsed = elapsed.as_secs() as f64 + (elapsed.subsec_micros() as f64) * 0.000_001;
let throughput = (TOTAL_BYTES * 8) as f64 / 1_000_000.0 / elapsed;
writeln!(
out,
" {} transfers of {} bytes in {:.3}s -> {:.3}Mbit/s",
TRANSFERS,
TRANSFER_BYTES,
elapsed,
throughput).expect("write failed");
}
fn random_data(len: usize) -> Vec<u8> {
let mut data = vec![0u8; len];
rand::thread_rng().fill(data.as_mut_slice());
data
}
| 30.242857 | 90 | 0.576051 |
67e546da52889da6babd41ddfa8ce71ae050032a | 581 | //! `crypto_stream_salsa2012` (Salsa20/12), a particular cipher specified in
//! [Cryptography in NaCl](http://nacl.cr.yp.to/valid.html), Section 7. This
//! cipher is conjectured to meet the standard notion of unpredictability.
use ffi::{crypto_stream_salsa2012,
crypto_stream_salsa2012_xor,
crypto_stream_salsa2012_KEYBYTES,
crypto_stream_salsa2012_NONCEBYTES};
stream_module!(crypto_stream_salsa2012,
crypto_stream_salsa2012_xor,
crypto_stream_salsa2012_KEYBYTES,
crypto_stream_salsa2012_NONCEBYTES);
| 44.692308 | 77 | 0.729776 |
e2e450184ff077f5e79403fa0bddd1251c9fe475 | 2,980 | use raytracing::{write_color, Color, Point3, Ray, Vec3};
// given a ray and a sphere's center and radius,
// returns -1 if the ray doesn't hit the sphere,
// else, gives a t value at which r = P(t) = A + tb hits the sphere
fn ray_hit_sphere_value(r: Ray, center: Point3, radius: f64) -> f64 {
let oc = r.origin() - center; // A - C
let a = r.direction().length_squared();
let half_b = Vec3::dot(r.direction(), oc);
let c = oc.length_squared() - (radius * radius);
// negative discriminant if ray doesn't hit the sphere,
// zero if it hits the sphere tangentially,
// and positive if it passes through the sphere
let discriminant = (half_b * half_b) - (a * c);
if discriminant < 0.0 {
-1.0
} else {
// return the smaller t value (closest hit point)
(-half_b - discriminant.sqrt()) / a
}
}
fn ray_color(r: Ray) -> Color {
let sphere_center = Point3::new(0.0, 0.0, -1.0);
let sphere_radius = 0.5;
let t = ray_hit_sphere_value(r, sphere_center, sphere_radius);
if t > 0.0 {
// surface normal unit vector at where the ray hit the sphere
let surface_normal = Vec3::unit(r.at(t) - sphere_center);
// map the xyz components from -1..1 to 0..1 rgb
return 0.5
* Color::new(
surface_normal.x() + 1.0,
surface_normal.y() + 1.0,
surface_normal.z() + 1.0,
);
}
let unit_direction = Vec3::unit(r.direction());
// y value is now -1 <= y <= 1. Transform so 0 <= t <= 1
let t = 0.5 * (unit_direction.y() + 1.0);
// at t = 1: output blue
// at t = 0: output white
// inbetween (0 < t < 1): linear blend
(1.0 - t) * Color::new(1.0, 1.0, 1.0) + t * Color::zero()
}
fn main() {
// image
let aspect_ratio = 16.0 / 9.0;
let image_width = 400u32;
let image_height = (image_width as f64 / aspect_ratio) as u32;
// camera
let viewport_height = 2.0;
let viewport_width = viewport_height * aspect_ratio;
let focal_length = 1.0;
let origin = Point3::zero();
let horizontal = Vec3::new(viewport_width, 0.0, 0.0);
let vertical = Vec3::new(0.0, viewport_height, 0.0);
let lower_left_corner =
-(horizontal / 2.0) - (vertical / 2.0) - Vec3::new(0.0, 0.0, focal_length);
// PPM image format specifications
println!("P3"); // colors are in ascii
println!("{} {}", image_width, image_height);
println!("{}", 255);
for y in (0..image_height).rev() {
eprintln!("Scanlines remaining: {}", y + 1);
for x in 0..image_width {
let x_scale = x as f64 / (image_width - 1) as f64;
let y_scale = y as f64 / (image_height - 1) as f64;
let r = Ray::new(
origin,
lower_left_corner + (horizontal * x_scale) + (vertical * y_scale),
);
let pixel_color = ray_color(r);
write_color(pixel_color);
}
}
}
| 35.903614 | 83 | 0.569799 |
3a140195d71dd1e2393153f482f36dd67d6e22f1 | 4,321 | #![allow(dead_code)]
use std::ptr;
use byteorder::{ByteOrder, BigEndian};
use syscall::io::Dma;
use syscall::error::{Result, EBADF, Error};
use super::hba::{HbaPort, HbaCmdTable, HbaCmdHeader};
use super::Disk;
const SCSI_READ_CAPACITY: u8 = 0x25;
const SCSI_READ10: u8 = 0x28;
pub struct DiskATAPI {
id: usize,
port: &'static mut HbaPort,
size: u64,
clb: Dma<[HbaCmdHeader; 32]>,
ctbas: [Dma<HbaCmdTable>; 32],
_fb: Dma<[u8; 256]>,
// Just using the same buffer size as DiskATA
// Although the sector size is different (and varies)
buf: Dma<[u8; 256 * 512]>
}
impl DiskATAPI {
pub fn new(id: usize, port: &'static mut HbaPort) -> Result<Self> {
let mut clb = Dma::zeroed()?;
let mut ctbas = [
Dma::zeroed()?, Dma::zeroed()?, Dma::zeroed()?, Dma::zeroed()?,
Dma::zeroed()?, Dma::zeroed()?, Dma::zeroed()?, Dma::zeroed()?,
Dma::zeroed()?, Dma::zeroed()?, Dma::zeroed()?, Dma::zeroed()?,
Dma::zeroed()?, Dma::zeroed()?, Dma::zeroed()?, Dma::zeroed()?,
Dma::zeroed()?, Dma::zeroed()?, Dma::zeroed()?, Dma::zeroed()?,
Dma::zeroed()?, Dma::zeroed()?, Dma::zeroed()?, Dma::zeroed()?,
Dma::zeroed()?, Dma::zeroed()?, Dma::zeroed()?, Dma::zeroed()?,
Dma::zeroed()?, Dma::zeroed()?, Dma::zeroed()?, Dma::zeroed()?,
];
let mut fb = Dma::zeroed()?;
let buf = Dma::zeroed()?;
port.init(&mut clb, &mut ctbas, &mut fb);
let size = unsafe { port.identify_packet(&mut clb, &mut ctbas).unwrap_or(0) };
Ok(DiskATAPI {
id: id,
port: port,
size: size,
clb: clb,
ctbas: ctbas,
_fb: fb,
buf: buf
})
}
fn read_capacity(&mut self) -> Result<(u32, u32)> {
// TODO: only query when needed (disk changed)
let mut cmd = [0; 16];
cmd[0] = SCSI_READ_CAPACITY;
self.port.atapi_dma(&cmd, 8, &mut self.clb, &mut self.ctbas, &mut self.buf)?;
// Instead of a count, contains number of last LBA, so add 1
let blk_count = BigEndian::read_u32(&self.buf[0..4]) + 1;
let blk_size = BigEndian::read_u32(&self.buf[4..8]);
Ok((blk_count, blk_size))
}
}
impl Disk for DiskATAPI {
fn id(&self) -> usize {
self.id
}
fn size(&mut self) -> u64 {
match self.read_capacity() {
Ok((blk_count, blk_size)) => (blk_count as u64) * (blk_size as u64),
Err(_) => 0 // XXX
}
}
fn read(&mut self, block: u64, buffer: &mut [u8]) -> Result<Option<usize>> {
// TODO: Handle audio CDs, which use special READ CD command
let blk_len = self.block_length()?;
let sectors = buffer.len() as u32 / blk_len;
fn read10_cmd(block: u32, count: u16) -> [u8; 16] {
let mut cmd = [0; 16];
cmd[0] = SCSI_READ10;
BigEndian::write_u32(&mut cmd[2..6], block as u32);
BigEndian::write_u16(&mut cmd[7..9], count as u16);
cmd
}
let mut sector = 0;
let buf_len = (256 * 512) / blk_len;
let buf_size = buf_len * blk_len;
while sectors - sector >= buf_len {
let cmd = read10_cmd(block as u32 + sector, buf_len as u16);
self.port.atapi_dma(&cmd, buf_size, &mut self.clb, &mut self.ctbas, &mut self.buf)?;
unsafe { ptr::copy(self.buf.as_ptr(), buffer.as_mut_ptr().offset(sector as isize * blk_len as isize), buf_size as usize); }
sector += blk_len;
}
if sector < sectors {
let cmd = read10_cmd(block as u32 + sector, (sectors - sector) as u16);
self.port.atapi_dma(&cmd, buf_size, &mut self.clb, &mut self.ctbas, &mut self.buf)?;
unsafe { ptr::copy(self.buf.as_ptr(), buffer.as_mut_ptr().offset(sector as isize * blk_len as isize), ((sectors - sector) * blk_len) as usize); }
sector += sectors - sector;
}
Ok(Some((sector * blk_len) as usize))
}
fn write(&mut self, _block: u64, _buffer: &[u8]) -> Result<Option<usize>> {
Err(Error::new(EBADF)) // TODO: Implement writing
}
fn block_length(&mut self) -> Result<u32> {
Ok(self.read_capacity()?.1)
}
}
| 32.984733 | 157 | 0.545244 |
0e2cafaaa8dddd08a769196a05ff8158660f3891 | 15,851 | use std::collections::{HashMap, HashSet};
use std::io::{BufReader, Error, ErrorKind, Read};
use std::process::{Command, Stdio};
use crate::connector::Connector;
use crate::source::Source;
use crate::transformer::Transformer;
use crate::types::{Column, OriginalQuery, Query};
use crate::SourceOptions;
use bson::{Bson, Document};
use dump_parser::mongodb::Archive;
pub struct MongoDB<'a> {
host: &'a str,
port: u16,
database: &'a str,
username: &'a str,
password: &'a str,
}
impl<'a> MongoDB<'a> {
pub fn new(
host: &'a str,
port: u16,
database: &'a str,
username: &'a str,
password: &'a str,
) -> Self {
MongoDB {
host,
port,
database,
username,
password,
}
}
}
impl<'a> Connector for MongoDB<'a> {
fn init(&mut self) -> Result<(), Error> {
Ok(())
}
}
impl<'a> Source for MongoDB<'a> {
fn read<F: FnMut(OriginalQuery, Query)>(
&self,
source_options: SourceOptions,
query_callback: F,
) -> Result<(), Error> {
let s_port = self.port.to_string();
let mut process = Command::new("mongodump")
.args([
"-h",
self.host,
"--port",
s_port.as_str(),
"--authenticationDatabase",
"admin",
"--db",
self.database,
"-u",
self.username,
"-p",
self.password,
"--archive", // dump to stdin
])
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()?;
let stdout = process
.stdout
.take()
.ok_or_else(|| Error::new(ErrorKind::Other, "Could not capture standard output."))?;
let reader = BufReader::new(stdout);
read_and_transform(reader, source_options, query_callback)?;
match process.wait() {
Ok(exit_status) => {
if !exit_status.success() {
return Err(Error::new(
ErrorKind::Other,
format!("command error: {:?}", exit_status.to_string()),
));
}
}
Err(err) => return Err(err),
}
Ok(())
}
}
pub fn recursively_transform_bson(
key: String,
bson: Bson,
transformers: &HashMap<String, &Box<dyn Transformer + '_>>,
wildcard_keys: &HashSet<String>,
) -> Bson {
let mut column;
match bson {
Bson::String(value) => {
column = Column::StringValue(key.clone(), value.clone());
column = match transformers.get(key.as_str()) {
Some(transformer) => transformer.transform(column), // apply transformation on the column
None => column,
};
Bson::String((*column.string_value().unwrap()).to_string())
}
Bson::Double(value) => {
column = Column::FloatNumberValue(key.clone(), value);
column = match transformers.get(key.as_str()) {
Some(transformer) => transformer.transform(column), // apply transformation on the column
None => column,
};
Bson::Double(*column.float_number_value().unwrap())
}
Bson::Array(arr) => {
let new_arr = arr
.iter()
.enumerate()
.map(|(idx, bson)| {
let wildcard_key = format!("{}.$[]", key);
recursively_transform_bson(
if wildcard_keys.contains(&wildcard_key) {
wildcard_key
} else {
format!("{}.{}", key, idx)
},
bson.clone(),
transformers,
wildcard_keys,
)
})
.collect::<Vec<Bson>>();
Bson::Array(new_arr)
}
Bson::Document(nested_doc) => Bson::Document(recursively_transform_document(
key,
nested_doc,
transformers,
wildcard_keys,
)),
Bson::Null => Bson::Null,
Bson::Int32(value) => {
column = Column::NumberValue(key.clone(), value as i128);
column = match transformers.get(key.as_str()) {
Some(transformer) => transformer.transform(column), // apply transformation on the column
None => column,
};
Bson::Int32(column.number_value().map(|&n| n as i32).unwrap())
}
Bson::Int64(value) => {
column = Column::NumberValue(key.clone(), value as i128);
column = match transformers.get(key.as_str()) {
Some(transformer) => transformer.transform(column), // apply transformation on the column
None => column,
};
Bson::Int64(column.number_value().map(|&n| n as i64).unwrap())
}
// ALL OF THE NEXT TYPES ARE NOT TRANSFORMABLE (yet?)
Bson::ObjectId(oid) => Bson::ObjectId(oid),
Bson::Binary(bin) => Bson::Binary(bin),
Bson::RegularExpression(regex) => Bson::RegularExpression(regex),
Bson::Boolean(value) => Bson::Boolean(value),
Bson::DateTime(value) => Bson::DateTime(value),
Bson::Timestamp(value) => Bson::Timestamp(value),
Bson::MinKey => Bson::MinKey,
Bson::MaxKey => Bson::MaxKey,
Bson::JavaScriptCode(jsc) => Bson::JavaScriptCode(jsc),
Bson::JavaScriptCodeWithScope(jsc) => Bson::JavaScriptCodeWithScope(jsc),
Bson::Symbol(symbol) => Bson::Symbol(symbol),
Bson::Decimal128(decimal) => Bson::Decimal128(decimal),
Bson::Undefined => Bson::Undefined,
Bson::DbPointer(db_pointer) => Bson::DbPointer(db_pointer),
}
}
pub fn recursively_transform_document(
prefix: String,
mut original_doc: Document,
transformers: &HashMap<String, &Box<dyn Transformer + '_>>,
wildcard_keys: &HashSet<String>,
) -> Document {
for (key, bson) in original_doc.clone() {
original_doc.insert(
key.clone(),
recursively_transform_bson(
format!("{}.{}", prefix, key),
bson,
transformers,
wildcard_keys,
),
);
}
original_doc
}
pub(crate) fn find_all_keys_with_array_wildcard_op(
transformers: &Vec<Box<dyn Transformer + '_>>,
) -> HashSet<String> {
let mut wildcard_keys = HashSet::new();
for transformer in transformers {
let column_name = transformer.column_name();
let delim = ".$[].";
let mut iter = 0;
while let Some(idx) = column_name[iter..].find(delim) {
let offset = delim.len();
iter += idx + offset;
let key = column_name[..(iter - 1)].to_string();
wildcard_keys.insert(format!("{}.{}", transformer.database_and_table_name(), key));
}
// try to find last delim
let last_delim = ".$[]"; // no dot at the end
if let Some(_) = column_name[iter..].find(last_delim) {
let key = column_name.to_string();
wildcard_keys.insert(format!("{}.{}", transformer.database_and_table_name(), key));
}
}
wildcard_keys
}
/// consume reader and apply transformation on INSERT INTO queries if needed
pub fn read_and_transform<R: Read, F: FnMut(OriginalQuery, Query)>(
reader: BufReader<R>,
source_options: SourceOptions,
mut query_callback: F,
) -> Result<(), Error> {
let transformers = source_options.transformers;
// create a set of wildcards to be used in the transformation
let wildcard_keys = find_all_keys_with_array_wildcard_op(transformers);
// create a map variable with Transformer by column_name
let mut transformer_by_db_and_table_and_column_name: HashMap<String, &Box<dyn Transformer>> =
HashMap::with_capacity(transformers.len());
for transformer in transformers {
let _ = transformer_by_db_and_table_and_column_name.insert(
transformer.database_and_table_and_column_name(),
transformer,
);
}
// init archive from reader
let mut archive = Archive::from_reader(reader)?;
let original_query = Query(archive.clone().into_bytes()?);
archive.alter_docs(|prefixed_collections| {
for (prefix, collection) in prefixed_collections.to_owned() {
let mut new_collection = vec![];
for doc in collection {
let new_doc = recursively_transform_document(
prefix.clone(), // prefix is <db_name>.<collection_name>
doc,
&transformer_by_db_and_table_and_column_name,
&wildcard_keys,
);
new_collection.push(new_doc);
}
prefixed_collections.insert(prefix, new_collection);
}
});
let query = Query(archive.into_bytes()?);
query_callback(original_query, query);
Ok(())
}
#[cfg(test)]
mod tests {
use crate::source::SourceOptions;
use crate::transformer::random::RandomTransformer;
use crate::Source;
use bson::{doc, Bson};
use std::collections::{HashMap, HashSet};
use std::vec;
use crate::source::mongodb::{find_all_keys_with_array_wildcard_op, MongoDB};
use crate::transformer::transient::TransientTransformer;
use crate::transformer::Transformer;
use super::recursively_transform_document;
fn get_mongodb() -> MongoDB<'static> {
MongoDB::new("localhost", 27017, "test", "root", "password")
}
fn get_invalid_mongodb() -> MongoDB<'static> {
MongoDB::new("localhost", 27017, "test", "root", "wrongpassword")
}
#[test]
fn connect() {
let p = get_mongodb();
let t1: Box<dyn Transformer> = Box::new(TransientTransformer::default());
let transformers = vec![t1];
let source_options = SourceOptions {
transformers: &transformers,
skip_config: &vec![],
};
assert!(p.read(source_options, |_, _| {}).is_ok());
let p = get_invalid_mongodb();
let t1: Box<dyn Transformer> = Box::new(TransientTransformer::default());
let transformers = vec![t1];
let source_options = SourceOptions {
transformers: &transformers,
skip_config: &vec![],
};
assert!(p.read(source_options, |_, _| {}).is_err());
}
#[test]
fn list_rows() {
let p = get_mongodb();
let t1: Box<dyn Transformer> = Box::new(TransientTransformer::default());
let transformers = vec![t1];
let source_options = SourceOptions {
transformers: &transformers,
skip_config: &vec![],
};
p.read(source_options, |original_query, query| {
assert!(original_query.data().len() > 0);
assert!(query.data().len() > 0);
})
.unwrap();
}
#[test]
fn recursive_document_transform() {
let database_name = "test";
let table_name = "users";
let columns = vec!["no_nest", "info.ext.number", "info_arr.0.a", "info_arr.1.b"];
let doc = doc! {
"no_nest": 5,
"info": {
"ext": {
"number": 123456789000 as i64
}
},
"info_arr" : [
{ "a": "SomeString" },
{ "b": 3.5 }
]
};
// Create a vec of all transformers
let transformers_vec = Vec::from_iter(columns.iter().map(|&c| {
let t: Box<dyn Transformer> = Box::new(RandomTransformer::new(
database_name,
table_name,
&c.to_string(),
));
t
}));
// Create a HashMap with Transformer by db_name.table_name.column_name
let transformers = HashMap::from_iter(
transformers_vec
.iter()
.map(|t| t.database_and_table_and_column_name())
.zip(transformers_vec.iter()),
);
// Recursively transform the document
let transformed_doc = recursively_transform_document(
"test.users".to_string(),
doc,
&transformers,
&HashSet::new(),
);
// Assert transformed values are not equal to original values
// no_nest
assert_ne!(transformed_doc.get("no_nest").unwrap(), &Bson::Int32(5));
// info.ext.number
assert_ne!(
transformed_doc
.get_document("info")
.unwrap()
.get_document("ext")
.unwrap()
.get("number")
.unwrap(),
&Bson::Int64(1234567890)
);
let arr = transformed_doc.get_array("info_arr").unwrap();
// info_arr.0.a
let doc = arr[0].as_document().unwrap();
assert_ne!(
doc.get("a").unwrap(),
&Bson::String("SomeString".to_string())
);
// info_arr.1.b
let doc = arr[1].as_document().unwrap();
assert_ne!(doc.get("b").unwrap(), &Bson::Double(3.5));
}
#[test]
fn recursive_document_transform_with_wildcard_nested() {
let database_name = "test";
let table_name = "users";
let column_name = "a.b.$[].c.0";
let doc = doc! {
"a": {
"b" : [
{
"c" : [
1, // should be transformed
2 // shouldn't be transformed
]
},
{
"c" : [
3, // should be transformed
4 // shouldn't be transformed
]
}
]
}
};
let t: Box<dyn Transformer> = Box::new(RandomTransformer::new(
database_name,
table_name,
column_name.into(),
));
let transformers_vec = vec![t];
// create a set of wildcards to be used in the transformation
let wildcard_keys = find_all_keys_with_array_wildcard_op(&transformers_vec);
// create a map variable with Transformer by column_name
let mut transformers: HashMap<String, &Box<dyn Transformer>> =
HashMap::with_capacity(transformers_vec.len());
for transformer in transformers_vec.iter() {
let _ = transformers.insert(
transformer.database_and_table_and_column_name(),
transformer,
);
}
// Recursively transform the document
let transformed_doc = recursively_transform_document(
"test.users".to_string(),
doc,
&transformers,
&wildcard_keys,
);
// Assert transformed values are not equal to original values
let arr = transformed_doc
.get_document("a")
.unwrap()
.get_array("b")
.unwrap();
// 1, 2
let mut doc = arr[0].as_document().unwrap();
let mut inner_arr = doc.get_array("c").unwrap();
assert_ne!(inner_arr[0], Bson::Int32(1));
assert_eq!(inner_arr[1], Bson::Int32(2));
// 3, 4
doc = arr[1].as_document().unwrap();
inner_arr = doc.get_array("c").unwrap();
assert_ne!(inner_arr[0], Bson::Int32(3));
assert_eq!(inner_arr[1], Bson::Int32(4));
}
}
| 33.725532 | 105 | 0.525961 |
de9c8a2feafb84ab69f7eceb98f6c03031f0f569 | 68,665 | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This implementation is largely based on the high-level description and analysis of B-Trees
// found in *Open Data Structures* (ODS). Although our implementation does not use any of
// the source found in ODS, if one wishes to review the high-level design of this structure, it
// can be freely downloaded at http://opendatastructures.org/. Its contents are as of this
// writing (August 2014) freely licensed under the following Creative Commons Attribution
// License: [CC BY 2.5 CA](http://creativecommons.org/licenses/by/2.5/ca/).
use self::Entry::*;
use core::cmp::Ordering;
use core::fmt::Debug;
use core::hash::{Hash, Hasher};
use core::iter::{Map, FromIterator};
use core::ops::Index;
use core::{fmt, mem, usize};
use Bound::{self, Included, Excluded, Unbounded};
use borrow::Borrow;
use vec_deque::VecDeque;
use self::Continuation::{Continue, Finished};
use self::StackOp::*;
use super::node::ForceResult::{Leaf, Internal};
use super::node::TraversalItem::{self, Elem, Edge};
use super::node::{Traversal, MutTraversal, MoveTraversal};
use super::node::{self, Node, Found, GoDown};
/// A map based on a B-Tree.
///
/// B-Trees represent a fundamental compromise between cache-efficiency and actually minimizing
/// the amount of work performed in a search. In theory, a binary search tree (BST) is the optimal
/// choice for a sorted map, as a perfectly balanced BST performs the theoretical minimum amount of
/// comparisons necessary to find an element (log<sub>2</sub>n). However, in practice the way this
/// is done is *very* inefficient for modern computer architectures. In particular, every element
/// is stored in its own individually heap-allocated node. This means that every single insertion
/// triggers a heap-allocation, and every single comparison should be a cache-miss. Since these
/// are both notably expensive things to do in practice, we are forced to at very least reconsider
/// the BST strategy.
///
/// A B-Tree instead makes each node contain B-1 to 2B-1 elements in a contiguous array. By doing
/// this, we reduce the number of allocations by a factor of B, and improve cache efficiency in
/// searches. However, this does mean that searches will have to do *more* comparisons on average.
/// The precise number of comparisons depends on the node search strategy used. For optimal cache
/// efficiency, one could search the nodes linearly. For optimal comparisons, one could search
/// the node using binary search. As a compromise, one could also perform a linear search
/// that initially only checks every i<sup>th</sup> element for some choice of i.
///
/// Currently, our implementation simply performs naive linear search. This provides excellent
/// performance on *small* nodes of elements which are cheap to compare. However in the future we
/// would like to further explore choosing the optimal search strategy based on the choice of B,
/// and possibly other factors. Using linear search, searching for a random element is expected
/// to take O(B log<sub>B</sub>n) comparisons, which is generally worse than a BST. In practice,
/// however, performance is excellent.
///
/// It is a logic error for a key to be modified in such a way that the key's ordering relative to
/// any other key, as determined by the `Ord` trait, changes while it is in the map. This is
/// normally only possible through `Cell`, `RefCell`, global state, I/O, or unsafe code.
#[derive(Clone)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct BTreeMap<K, V> {
root: Node<K, V>,
length: usize,
depth: usize,
b: usize,
}
/// An abstract base over-which all other BTree iterators are built.
#[derive(Clone)]
struct AbsIter<T> {
traversals: VecDeque<T>,
size: usize,
}
/// An iterator over a BTreeMap's entries.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Iter<'a, K: 'a, V: 'a> {
inner: AbsIter<Traversal<'a, K, V>>,
}
/// A mutable iterator over a BTreeMap's entries.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IterMut<'a, K: 'a, V: 'a> {
inner: AbsIter<MutTraversal<'a, K, V>>,
}
/// An owning iterator over a BTreeMap's entries.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IntoIter<K, V> {
inner: AbsIter<MoveTraversal<K, V>>,
}
/// An iterator over a BTreeMap's keys.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Keys<'a, K: 'a, V: 'a> {
inner: Map<Iter<'a, K, V>, fn((&'a K, &'a V)) -> &'a K>,
}
/// An iterator over a BTreeMap's values.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Values<'a, K: 'a, V: 'a> {
inner: Map<Iter<'a, K, V>, fn((&'a K, &'a V)) -> &'a V>,
}
/// An iterator over a sub-range of BTreeMap's entries.
pub struct Range<'a, K: 'a, V: 'a> {
inner: AbsIter<Traversal<'a, K, V>>,
}
/// A mutable iterator over a sub-range of BTreeMap's entries.
pub struct RangeMut<'a, K: 'a, V: 'a> {
inner: AbsIter<MutTraversal<'a, K, V>>,
}
/// A view into a single entry in a map, which may either be vacant or occupied.
#[stable(feature = "rust1", since = "1.0.0")]
pub enum Entry<'a, K: 'a, V: 'a> {
/// A vacant Entry
#[stable(feature = "rust1", since = "1.0.0")]
Vacant(VacantEntry<'a, K, V>),
/// An occupied Entry
#[stable(feature = "rust1", since = "1.0.0")]
Occupied(OccupiedEntry<'a, K, V>),
}
/// A vacant Entry.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct VacantEntry<'a, K: 'a, V: 'a> {
key: K,
stack: stack::SearchStack<'a, K, V, node::handle::Edge, node::handle::Leaf>,
}
/// An occupied Entry.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct OccupiedEntry<'a, K: 'a, V: 'a> {
stack: stack::SearchStack<'a, K, V, node::handle::KV, node::handle::LeafOrInternal>,
}
impl<K: Ord, V> BTreeMap<K, V> {
/// Makes a new empty BTreeMap with a reasonable choice for B.
#[stable(feature = "rust1", since = "1.0.0")]
#[allow(deprecated)]
pub fn new() -> BTreeMap<K, V> {
BTreeMap {
length: 0,
depth: 1,
root: Node::make_leaf_root(6),
// FIXME(Gankro): Tune this as a function of size_of<K/V>?
b: 6,
}
}
/// Clears the map, removing all values.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut a = BTreeMap::new();
/// a.insert(1, "a");
/// a.clear();
/// assert!(a.is_empty());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn clear(&mut self) {
// avoid recursive destructors by manually traversing the tree
for _ in mem::replace(self, BTreeMap::new()) {}
}
// Searching in a B-Tree is pretty straightforward.
//
// Start at the root. Try to find the key in the current node. If we find it, return it.
// If it's not in there, follow the edge *before* the smallest key larger than
// the search key. If no such key exists (they're *all* smaller), then just take the last
// edge in the node. If we're in a leaf and we don't find our key, then it's not
// in the tree.
/// Returns a reference to the value corresponding to the key.
///
/// The key may be any borrowed form of the map's key type, but the ordering
/// on the borrowed form *must* match the ordering on the key type.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut map = BTreeMap::new();
/// map.insert(1, "a");
/// assert_eq!(map.get(&1), Some(&"a"));
/// assert_eq!(map.get(&2), None);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get<Q: ?Sized>(&self, key: &Q) -> Option<&V>
where K: Borrow<Q>,
Q: Ord
{
let mut cur_node = &self.root;
loop {
match Node::search(cur_node, key) {
Found(handle) => return Some(handle.into_kv().1),
GoDown(handle) => {
match handle.force() {
Leaf(_) => return None,
Internal(internal_handle) => {
cur_node = internal_handle.into_edge();
continue;
}
}
}
}
}
}
/// Returns true if the map contains a value for the specified key.
///
/// The key may be any borrowed form of the map's key type, but the ordering
/// on the borrowed form *must* match the ordering on the key type.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut map = BTreeMap::new();
/// map.insert(1, "a");
/// assert_eq!(map.contains_key(&1), true);
/// assert_eq!(map.contains_key(&2), false);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn contains_key<Q: ?Sized>(&self, key: &Q) -> bool
where K: Borrow<Q>,
Q: Ord
{
self.get(key).is_some()
}
/// Returns a mutable reference to the value corresponding to the key.
///
/// The key may be any borrowed form of the map's key type, but the ordering
/// on the borrowed form *must* match the ordering on the key type.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut map = BTreeMap::new();
/// map.insert(1, "a");
/// if let Some(x) = map.get_mut(&1) {
/// *x = "b";
/// }
/// assert_eq!(map[&1], "b");
/// ```
// See `get` for implementation notes, this is basically a copy-paste with mut's added
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get_mut<Q: ?Sized>(&mut self, key: &Q) -> Option<&mut V>
where K: Borrow<Q>,
Q: Ord
{
// temp_node is a Borrowck hack for having a mutable value outlive a loop iteration
let mut temp_node = &mut self.root;
loop {
let cur_node = temp_node;
match Node::search(cur_node, key) {
Found(handle) => return Some(handle.into_kv_mut().1),
GoDown(handle) => {
match handle.force() {
Leaf(_) => return None,
Internal(internal_handle) => {
temp_node = internal_handle.into_edge_mut();
continue;
}
}
}
}
}
}
// Insertion in a B-Tree is a bit complicated.
//
// First we do the same kind of search described in `find`. But we need to maintain a stack of
// all the nodes/edges in our search path. If we find a match for the key we're trying to
// insert, just swap the vals and return the old ones. However, when we bottom out in a leaf,
// we attempt to insert our key-value pair at the same location we would want to follow another
// edge.
//
// If the node has room, then this is done in the obvious way by shifting elements. However,
// if the node itself is full, we split node into two, and give its median key-value
// pair to its parent to insert the new node with. Of course, the parent may also be
// full, and insertion can propagate until we reach the root. If we reach the root, and
// it is *also* full, then we split the root and place the two nodes under a newly made root.
//
// Note that we subtly deviate from Open Data Structures in our implementation of split.
// ODS describes inserting into the node *regardless* of its capacity, and then
// splitting *afterwards* if it happens to be overfull. However, this is inefficient.
// Instead, we split beforehand, and then insert the key-value pair into the appropriate
// result node. This has two consequences:
//
// 1) While ODS produces a left node of size B-1, and a right node of size B,
// we may potentially reverse this. However, this shouldn't effect the analysis.
//
// 2) While ODS may potentially return the pair we *just* inserted after
// the split, we will never do this. Again, this shouldn't effect the analysis.
/// Inserts a key-value pair into the map.
///
/// If the map did not have this key present, `None` is returned.
///
/// If the map did have this key present, the key is not updated, the
/// value is updated and the old value is returned.
/// See the [module-level documentation] for more.
///
/// [module-level documentation]: index.html#insert-and-complex-keys
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut map = BTreeMap::new();
/// assert_eq!(map.insert(37, "a"), None);
/// assert_eq!(map.is_empty(), false);
///
/// map.insert(37, "b");
/// assert_eq!(map.insert(37, "c"), Some("b"));
/// assert_eq!(map[&37], "c");
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn insert(&mut self, mut key: K, mut value: V) -> Option<V> {
// This is a stack of rawptrs to nodes paired with indices, respectively
// representing the nodes and edges of our search path. We have to store rawptrs
// because as far as Rust is concerned, we can mutate aliased data with such a
// stack. It is of course correct, but what it doesn't know is that we will only
// be popping and using these ptrs one at a time in child-to-parent order. The alternative
// to doing this is to take the Nodes from their parents. This actually makes
// borrowck *really* happy and everything is pretty smooth. However, this creates
// *tons* of pointless writes, and requires us to always walk all the way back to
// the root after an insertion, even if we only needed to change a leaf. Therefore,
// we accept this potential unsafety and complexity in the name of performance.
//
// Regardless, the actual dangerous logic is completely abstracted away from BTreeMap
// by the stack module. All it can do is immutably read nodes, and ask the search stack
// to proceed down some edge by index. This makes the search logic we'll be reusing in a
// few different methods much neater, and of course drastically improves safety.
let mut stack = stack::PartialSearchStack::new(self);
loop {
let result = stack.with(move |pusher, node| {
// Same basic logic as found in `find`, but with PartialSearchStack mediating the
// actual nodes for us
match Node::search(node, &key) {
Found(mut handle) => {
// Perfect match, swap the values and return the old one
mem::swap(handle.val_mut(), &mut value);
Finished(Some(value))
}
GoDown(handle) => {
// We need to keep searching, try to get the search stack
// to go down further
match handle.force() {
Leaf(leaf_handle) => {
// We've reached a leaf, perform the insertion here
pusher.seal(leaf_handle).insert(key, value);
Finished(None)
}
Internal(internal_handle) => {
// We've found the subtree to insert this key/value pair in,
// keep searching
Continue((pusher.push(internal_handle), key, value))
}
}
}
}
});
match result {
Finished(ret) => return ret,
Continue((new_stack, renewed_key, renewed_val)) => {
stack = new_stack;
key = renewed_key;
value = renewed_val;
}
}
}
}
// Deletion is the most complicated operation for a B-Tree.
//
// First we do the same kind of search described in
// `find`. But we need to maintain a stack of all the nodes/edges in our search path.
// If we don't find the key, then we just return `None` and do nothing. If we do find the
// key, we perform two operations: remove the item, and then possibly handle underflow.
//
// # removing the item
// If the node is a leaf, we just remove the item, and shift
// any items after it back to fill the hole.
//
// If the node is an internal node, we *swap* the item with the smallest item in
// in its right subtree (which must reside in a leaf), and then revert to the leaf
// case
//
// # handling underflow
// After removing an item, there may be too few items in the node. We want nodes
// to be mostly full for efficiency, although we make an exception for the root, which
// may have as few as one item. If this is the case, we may first try to steal
// an item from our left or right neighbour.
//
// To steal from the left (right) neighbour,
// we take the largest (smallest) item and child from it. We then swap the taken item
// with the item in their mutual parent that separates them, and then insert the
// parent's item and the taken child into the first (last) index of the underflowed node.
//
// However, stealing has the possibility of underflowing our neighbour. If this is the
// case, we instead *merge* with our neighbour. This of course reduces the number of
// children in the parent. Therefore, we also steal the item that separates the now
// merged nodes, and insert it into the merged node.
//
// Merging may cause the parent to underflow. If this is the case, then we must repeat
// the underflow handling process on the parent. If merging merges the last two children
// of the root, then we replace the root with the merged node.
/// Removes a key from the map, returning the value at the key if the key
/// was previously in the map.
///
/// The key may be any borrowed form of the map's key type, but the ordering
/// on the borrowed form *must* match the ordering on the key type.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut map = BTreeMap::new();
/// map.insert(1, "a");
/// assert_eq!(map.remove(&1), Some("a"));
/// assert_eq!(map.remove(&1), None);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn remove<Q: ?Sized>(&mut self, key: &Q) -> Option<V>
where K: Borrow<Q>,
Q: Ord
{
// See `swap` for a more thorough description of the stuff going on in here
let mut stack = stack::PartialSearchStack::new(self);
loop {
let result = stack.with(move |pusher, node| {
match Node::search(node, key) {
Found(handle) => {
// Perfect match. Terminate the stack here, and remove the entry
Finished(Some(pusher.seal(handle).remove()))
}
GoDown(handle) => {
// We need to keep searching, try to go down the next edge
match handle.force() {
// We're at a leaf; the key isn't in here
Leaf(_) => Finished(None),
Internal(internal_handle) => Continue(pusher.push(internal_handle)),
}
}
}
});
match result {
Finished(ret) => return ret.map(|(_, v)| v),
Continue(new_stack) => stack = new_stack,
}
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K, V> IntoIterator for BTreeMap<K, V> {
type Item = (K, V);
type IntoIter = IntoIter<K, V>;
/// Gets an owning iterator over the entries of the map.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut map = BTreeMap::new();
/// map.insert(1, "a");
/// map.insert(2, "b");
/// map.insert(3, "c");
///
/// for (key, value) in map.into_iter() {
/// println!("{}: {}", key, value);
/// }
/// ```
fn into_iter(self) -> IntoIter<K, V> {
let len = self.len();
let mut lca = VecDeque::new();
lca.push_back(Traverse::traverse(self.root));
IntoIter {
inner: AbsIter {
traversals: lca,
size: len,
},
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> IntoIterator for &'a BTreeMap<K, V> {
type Item = (&'a K, &'a V);
type IntoIter = Iter<'a, K, V>;
fn into_iter(self) -> Iter<'a, K, V> {
self.iter()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> IntoIterator for &'a mut BTreeMap<K, V> {
type Item = (&'a K, &'a mut V);
type IntoIter = IterMut<'a, K, V>;
fn into_iter(mut self) -> IterMut<'a, K, V> {
self.iter_mut()
}
}
/// A helper enum useful for deciding whether to continue a loop since we can't
/// return from a closure
enum Continuation<A, B> {
Continue(A),
Finished(B),
}
/// The stack module provides a safe interface for constructing and manipulating a stack of ptrs
/// to nodes. By using this module much better safety guarantees can be made, and more search
/// boilerplate gets cut out.
mod stack {
use core::marker;
use core::mem;
use core::ops::{Deref, DerefMut};
use super::BTreeMap;
use super::super::node::{self, Node, Fit, Split, Internal, Leaf};
use super::super::node::handle;
use vec::Vec;
struct InvariantLifetime<'id>(marker::PhantomData<::core::cell::Cell<&'id ()>>);
impl<'id> InvariantLifetime<'id> {
fn new() -> InvariantLifetime<'id> {
InvariantLifetime(marker::PhantomData)
}
}
/// A generic mutable reference, identical to `&mut` except for the fact that its lifetime
/// parameter is invariant. This means that wherever an `IdRef` is expected, only an `IdRef`
/// with the exact requested lifetime can be used. This is in contrast to normal references,
/// where `&'static` can be used in any function expecting any lifetime reference.
pub struct IdRef<'id, T: 'id> {
inner: &'id mut T,
_marker: InvariantLifetime<'id>,
}
impl<'id, T> Deref for IdRef<'id, T> {
type Target = T;
fn deref(&self) -> &T {
&*self.inner
}
}
impl<'id, T> DerefMut for IdRef<'id, T> {
fn deref_mut(&mut self) -> &mut T {
&mut *self.inner
}
}
type StackItem<K, V> = node::Handle<*mut Node<K, V>, handle::Edge, handle::Internal>;
type Stack<K, V> = Vec<StackItem<K, V>>;
/// A `PartialSearchStack` handles the construction of a search stack.
pub struct PartialSearchStack<'a, K: 'a, V: 'a> {
map: &'a mut BTreeMap<K, V>,
stack: Stack<K, V>,
next: *mut Node<K, V>,
}
/// A `SearchStack` represents a full path to an element or an edge of interest. It provides
/// methods depending on the type of what the path points to for removing an element, inserting
/// a new element, and manipulating to element at the top of the stack.
pub struct SearchStack<'a, K: 'a, V: 'a, Type, NodeType> {
map: &'a mut BTreeMap<K, V>,
stack: Stack<K, V>,
top: node::Handle<*mut Node<K, V>, Type, NodeType>,
}
/// A `PartialSearchStack` that doesn't hold a reference to the next node, and is just
/// just waiting for a `Handle` to that next node to be pushed. See `PartialSearchStack::with`
/// for more details.
pub struct Pusher<'id, 'a, K: 'a, V: 'a> {
map: &'a mut BTreeMap<K, V>,
stack: Stack<K, V>,
_marker: InvariantLifetime<'id>,
}
impl<'a, K, V> PartialSearchStack<'a, K, V> {
/// Creates a new PartialSearchStack from a BTreeMap by initializing the stack with the
/// root of the tree.
pub fn new(map: &'a mut BTreeMap<K, V>) -> PartialSearchStack<'a, K, V> {
let depth = map.depth;
PartialSearchStack {
next: &mut map.root as *mut _,
map: map,
stack: Vec::with_capacity(depth),
}
}
/// Breaks up the stack into a `Pusher` and the next `Node`, allowing the given closure
/// to interact with, search, and finally push the `Node` onto the stack. The passed in
/// closure must be polymorphic on the `'id` lifetime parameter, as this statically
/// ensures that only `Handle`s from the correct `Node` can be pushed.
///
/// The reason this works is that the `Pusher` has an `'id` parameter, and will only accept
/// handles with the same `'id`. The closure could only get references with that lifetime
/// through its arguments or through some other `IdRef` that it has lying around. However,
/// no other `IdRef` could possibly work - because the `'id` is held in an invariant
/// parameter, it would need to have precisely the correct lifetime, which would mean that
/// at least one of the calls to `with` wouldn't be properly polymorphic, wanting a
/// specific lifetime instead of the one that `with` chooses to give it.
///
/// See also Haskell's `ST` monad, which uses a similar trick.
pub fn with<T, F: for<'id> FnOnce(Pusher<'id, 'a, K, V>,
IdRef<'id, Node<K, V>>) -> T>(self, closure: F) -> T {
let pusher = Pusher {
map: self.map,
stack: self.stack,
_marker: InvariantLifetime::new(),
};
let node = IdRef {
inner: unsafe { &mut *self.next },
_marker: InvariantLifetime::new(),
};
closure(pusher, node)
}
}
impl<'id, 'a, K, V> Pusher<'id, 'a, K, V> {
/// Pushes the requested child of the stack's current top on top of the stack. If the child
/// exists, then a new PartialSearchStack is yielded. Otherwise, a VacantSearchStack is
/// yielded.
pub fn push(mut self,
mut edge: node::Handle<IdRef<'id, Node<K, V>>, handle::Edge, handle::Internal>)
-> PartialSearchStack<'a, K, V> {
self.stack.push(edge.as_raw());
PartialSearchStack {
map: self.map,
stack: self.stack,
next: edge.edge_mut() as *mut _,
}
}
/// Converts the PartialSearchStack into a SearchStack.
pub fn seal<Type, NodeType>(self,
mut handle: node::Handle<IdRef<'id, Node<K, V>>,
Type,
NodeType>)
-> SearchStack<'a, K, V, Type, NodeType> {
SearchStack {
map: self.map,
stack: self.stack,
top: handle.as_raw(),
}
}
}
impl<'a, K, V, NodeType> SearchStack<'a, K, V, handle::KV, NodeType> {
/// Gets a reference to the value the stack points to.
pub fn peek(&self) -> &V {
unsafe { self.top.from_raw().into_kv().1 }
}
/// Gets a mutable reference to the value the stack points to.
pub fn peek_mut(&mut self) -> &mut V {
unsafe { self.top.from_raw_mut().into_kv_mut().1 }
}
/// Converts the stack into a mutable reference to the value it points to, with a lifetime
/// tied to the original tree.
pub fn into_top(mut self) -> &'a mut V {
unsafe { &mut *(self.top.from_raw_mut().val_mut() as *mut V) }
}
}
impl<'a, K, V> SearchStack<'a, K, V, handle::KV, handle::Leaf> {
/// Removes the key and value in the top element of the stack, then handles underflows as
/// described in BTree's pop function.
fn remove_leaf(mut self) -> (K, V) {
self.map.length -= 1;
// Remove the key-value pair from the leaf that this search stack points to.
// Then, note if the leaf is underfull, and promptly forget the leaf and its ptr
// to avoid ownership issues.
let (key_val, mut underflow) = unsafe {
let key_val = self.top.from_raw_mut().remove_as_leaf();
let underflow = self.top.from_raw().node().is_underfull();
(key_val, underflow)
};
loop {
match self.stack.pop() {
None => {
// We've reached the root, so no matter what, we're done. We manually
// access the root via the tree itself to avoid creating any dangling
// pointers.
if self.map.root.is_empty() && !self.map.root.is_leaf() {
// We've emptied out the root, so make its only child the new root.
// If it's a leaf, we just let it become empty.
self.map.depth -= 1;
self.map.root.hoist_lone_child();
}
return key_val;
}
Some(mut handle) => {
if underflow {
// Underflow! Handle it!
unsafe {
handle.from_raw_mut().handle_underflow();
underflow = handle.from_raw().node().is_underfull();
}
} else {
// All done!
return key_val;
}
}
}
}
}
}
impl<'a, K, V> SearchStack<'a, K, V, handle::KV, handle::LeafOrInternal> {
/// Removes the key and value in the top element of the stack, then handles underflows as
/// described in BTree's pop function.
pub fn remove(self) -> (K, V) {
// Ensure that the search stack goes to a leaf. This is necessary to perform deletion
// in a BTree. Note that this may put the tree in an inconsistent state (further
// described in into_leaf's comments), but this is immediately fixed by the
// removing the value we want to remove
self.into_leaf().remove_leaf()
}
/// Subroutine for removal. Takes a search stack for a key that might terminate at an
/// internal node, and mutates the tree and search stack to *make* it a search stack
/// for that same key that *does* terminates at a leaf. If the mutation occurs, then this
/// leaves the tree in an inconsistent state that must be repaired by the caller by
/// removing the entry in question. Specifically the key-value pair and its successor will
/// become swapped.
fn into_leaf(mut self) -> SearchStack<'a, K, V, handle::KV, handle::Leaf> {
unsafe {
let mut top_raw = self.top;
let mut top = top_raw.from_raw_mut();
let key_ptr = top.key_mut() as *mut _;
let val_ptr = top.val_mut() as *mut _;
// Try to go into the right subtree of the found key to find its successor
match top.force() {
Leaf(mut leaf_handle) => {
// We're a proper leaf stack, nothing to do
return SearchStack {
map: self.map,
stack: self.stack,
top: leaf_handle.as_raw(),
};
}
Internal(mut internal_handle) => {
let mut right_handle = internal_handle.right_edge();
// We're not a proper leaf stack, let's get to work.
self.stack.push(right_handle.as_raw());
let mut temp_node = right_handle.edge_mut();
loop {
// Walk into the smallest subtree of this node
let node = temp_node;
match node.kv_handle(0).force() {
Leaf(mut handle) => {
// This node is a leaf, do the swap and return
mem::swap(handle.key_mut(), &mut *key_ptr);
mem::swap(handle.val_mut(), &mut *val_ptr);
return SearchStack {
map: self.map,
stack: self.stack,
top: handle.as_raw(),
};
}
Internal(kv_handle) => {
// This node is internal, go deeper
let mut handle = kv_handle.into_left_edge();
self.stack.push(handle.as_raw());
temp_node = handle.into_edge_mut();
}
}
}
}
}
}
}
}
impl<'a, K, V> SearchStack<'a, K, V, handle::Edge, handle::Leaf> {
/// Inserts the key and value into the top element in the stack, and if that node has to
/// split recursively inserts the split contents into the next element stack until
/// splits stop.
///
/// Assumes that the stack represents a search path from the root to a leaf.
///
/// An &mut V is returned to the inserted value, for callers that want a reference to this.
pub fn insert(mut self, key: K, val: V) -> &'a mut V {
unsafe {
self.map.length += 1;
// Insert the key and value into the leaf at the top of the stack
let (mut insertion, inserted_ptr) = self.top
.from_raw_mut()
.insert_as_leaf(key, val);
loop {
match insertion {
Fit => {
// The last insertion went off without a hitch, no splits! We can stop
// inserting now.
return &mut *inserted_ptr;
}
Split(key, val, right) => {
match self.stack.pop() {
// The last insertion triggered a split, so get the next element on
// the stack to recursively insert the split node into.
None => {
// The stack was empty; we've split the root, and need to make a
// a new one. This is done in-place because we can't move the
// root out of a reference to the tree.
Node::make_internal_root(&mut self.map.root,
self.map.b,
key,
val,
right);
self.map.depth += 1;
return &mut *inserted_ptr;
}
Some(mut handle) => {
// The stack wasn't empty, do the insertion and recurse
insertion = handle.from_raw_mut()
.insert_as_internal(key, val, right);
continue;
}
}
}
}
}
}
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K: Ord, V> FromIterator<(K, V)> for BTreeMap<K, V> {
fn from_iter<T: IntoIterator<Item = (K, V)>>(iter: T) -> BTreeMap<K, V> {
let mut map = BTreeMap::new();
map.extend(iter);
map
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K: Ord, V> Extend<(K, V)> for BTreeMap<K, V> {
#[inline]
fn extend<T: IntoIterator<Item = (K, V)>>(&mut self, iter: T) {
for (k, v) in iter {
self.insert(k, v);
}
}
}
#[stable(feature = "extend_ref", since = "1.2.0")]
impl<'a, K: Ord + Copy, V: Copy> Extend<(&'a K, &'a V)> for BTreeMap<K, V> {
fn extend<I: IntoIterator<Item = (&'a K, &'a V)>>(&mut self, iter: I) {
self.extend(iter.into_iter().map(|(&key, &value)| (key, value)));
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K: Hash, V: Hash> Hash for BTreeMap<K, V> {
fn hash<H: Hasher>(&self, state: &mut H) {
for elt in self {
elt.hash(state);
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K: Ord, V> Default for BTreeMap<K, V> {
fn default() -> BTreeMap<K, V> {
BTreeMap::new()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K: PartialEq, V: PartialEq> PartialEq for BTreeMap<K, V> {
fn eq(&self, other: &BTreeMap<K, V>) -> bool {
self.len() == other.len() && self.iter().zip(other).all(|(a, b)| a == b)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K: Eq, V: Eq> Eq for BTreeMap<K, V> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K: PartialOrd, V: PartialOrd> PartialOrd for BTreeMap<K, V> {
#[inline]
fn partial_cmp(&self, other: &BTreeMap<K, V>) -> Option<Ordering> {
self.iter().partial_cmp(other.iter())
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K: Ord, V: Ord> Ord for BTreeMap<K, V> {
#[inline]
fn cmp(&self, other: &BTreeMap<K, V>) -> Ordering {
self.iter().cmp(other.iter())
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K: Debug, V: Debug> Debug for BTreeMap<K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_map().entries(self.iter()).finish()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K: Ord, Q: ?Sized, V> Index<&'a Q> for BTreeMap<K, V>
where K: Borrow<Q>,
Q: Ord
{
type Output = V;
#[inline]
fn index(&self, key: &Q) -> &V {
self.get(key).expect("no entry found for key")
}
}
/// Genericizes over how to get the correct type of iterator from the correct type
/// of Node ownership.
trait Traverse<N> {
fn traverse(node: N) -> Self;
}
impl<'a, K, V> Traverse<&'a Node<K, V>> for Traversal<'a, K, V> {
fn traverse(node: &'a Node<K, V>) -> Traversal<'a, K, V> {
node.iter()
}
}
impl<'a, K, V> Traverse<&'a mut Node<K, V>> for MutTraversal<'a, K, V> {
fn traverse(node: &'a mut Node<K, V>) -> MutTraversal<'a, K, V> {
node.iter_mut()
}
}
impl<K, V> Traverse<Node<K, V>> for MoveTraversal<K, V> {
fn traverse(node: Node<K, V>) -> MoveTraversal<K, V> {
node.into_iter()
}
}
/// Represents an operation to perform inside the following iterator methods.
/// This is necessary to use in `next` because we want to modify `self.traversals` inside
/// a match that borrows it. Similarly in `next_back`. Instead, we use this enum to note
/// what we want to do, and do it after the match.
enum StackOp<T> {
Push(T),
Pop,
}
impl<K, V, E, T> Iterator for AbsIter<T>
where T: DoubleEndedIterator<Item = TraversalItem<K, V, E>> + Traverse<E>
{
type Item = (K, V);
// Our iterator represents a queue of all ancestors of elements we have
// yet to yield, from smallest to largest. Note that the design of these
// iterators permits an *arbitrary* initial pair of min and max, making
// these arbitrary sub-range iterators.
fn next(&mut self) -> Option<(K, V)> {
loop {
// We want the smallest element, so try to get the back of the queue
let op = match self.traversals.back_mut() {
None => return None,
// The queue wasn't empty, so continue along the node in its head
Some(iter) => {
match iter.next() {
// The head is empty, so Pop it off and continue the process
None => Pop,
// The head yielded an edge, so make that the new head
Some(Edge(next)) => Push(Traverse::traverse(next)),
// The head yielded an entry, so yield that
Some(Elem(kv)) => {
self.size -= 1;
return Some(kv);
}
}
}
};
// Handle any operation as necessary, without a conflicting borrow of the queue
match op {
Push(item) => {
self.traversals.push_back(item);
}
Pop => {
self.traversals.pop_back();
}
}
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.size, Some(self.size))
}
}
impl<K, V, E, T> DoubleEndedIterator for AbsIter<T>
where T: DoubleEndedIterator<Item = TraversalItem<K, V, E>> + Traverse<E>
{
// next_back is totally symmetric to next
#[inline]
fn next_back(&mut self) -> Option<(K, V)> {
loop {
let op = match self.traversals.front_mut() {
None => return None,
Some(iter) => {
match iter.next_back() {
None => Pop,
Some(Edge(next)) => Push(Traverse::traverse(next)),
Some(Elem(kv)) => {
self.size -= 1;
return Some(kv);
}
}
}
};
match op {
Push(item) => {
self.traversals.push_front(item);
}
Pop => {
self.traversals.pop_front();
}
}
}
}
}
impl<'a, K, V> Clone for Iter<'a, K, V> {
fn clone(&self) -> Iter<'a, K, V> {
Iter { inner: self.inner.clone() }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> Iterator for Iter<'a, K, V> {
type Item = (&'a K, &'a V);
fn next(&mut self) -> Option<(&'a K, &'a V)> {
self.inner.next()
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> DoubleEndedIterator for Iter<'a, K, V> {
fn next_back(&mut self) -> Option<(&'a K, &'a V)> {
self.inner.next_back()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> ExactSizeIterator for Iter<'a, K, V> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> Iterator for IterMut<'a, K, V> {
type Item = (&'a K, &'a mut V);
fn next(&mut self) -> Option<(&'a K, &'a mut V)> {
self.inner.next()
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> DoubleEndedIterator for IterMut<'a, K, V> {
fn next_back(&mut self) -> Option<(&'a K, &'a mut V)> {
self.inner.next_back()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> ExactSizeIterator for IterMut<'a, K, V> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K, V> Iterator for IntoIter<K, V> {
type Item = (K, V);
fn next(&mut self) -> Option<(K, V)> {
self.inner.next()
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K, V> DoubleEndedIterator for IntoIter<K, V> {
fn next_back(&mut self) -> Option<(K, V)> {
self.inner.next_back()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<K, V> ExactSizeIterator for IntoIter<K, V> {}
impl<'a, K, V> Clone for Keys<'a, K, V> {
fn clone(&self) -> Keys<'a, K, V> {
Keys { inner: self.inner.clone() }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> Iterator for Keys<'a, K, V> {
type Item = &'a K;
fn next(&mut self) -> Option<(&'a K)> {
self.inner.next()
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> DoubleEndedIterator for Keys<'a, K, V> {
fn next_back(&mut self) -> Option<(&'a K)> {
self.inner.next_back()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> ExactSizeIterator for Keys<'a, K, V> {}
impl<'a, K, V> Clone for Values<'a, K, V> {
fn clone(&self) -> Values<'a, K, V> {
Values { inner: self.inner.clone() }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> Iterator for Values<'a, K, V> {
type Item = &'a V;
fn next(&mut self) -> Option<(&'a V)> {
self.inner.next()
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> DoubleEndedIterator for Values<'a, K, V> {
fn next_back(&mut self) -> Option<(&'a V)> {
self.inner.next_back()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> ExactSizeIterator for Values<'a, K, V> {}
impl<'a, K, V> Clone for Range<'a, K, V> {
fn clone(&self) -> Range<'a, K, V> {
Range { inner: self.inner.clone() }
}
}
impl<'a, K, V> Iterator for Range<'a, K, V> {
type Item = (&'a K, &'a V);
fn next(&mut self) -> Option<(&'a K, &'a V)> {
self.inner.next()
}
}
impl<'a, K, V> DoubleEndedIterator for Range<'a, K, V> {
fn next_back(&mut self) -> Option<(&'a K, &'a V)> {
self.inner.next_back()
}
}
impl<'a, K, V> Iterator for RangeMut<'a, K, V> {
type Item = (&'a K, &'a mut V);
fn next(&mut self) -> Option<(&'a K, &'a mut V)> {
self.inner.next()
}
}
impl<'a, K, V> DoubleEndedIterator for RangeMut<'a, K, V> {
fn next_back(&mut self) -> Option<(&'a K, &'a mut V)> {
self.inner.next_back()
}
}
impl<'a, K: Ord, V> Entry<'a, K, V> {
#[stable(feature = "rust1", since = "1.0.0")]
/// Ensures a value is in the entry by inserting the default if empty, and returns
/// a mutable reference to the value in the entry.
pub fn or_insert(self, default: V) -> &'a mut V {
match self {
Occupied(entry) => entry.into_mut(),
Vacant(entry) => entry.insert(default),
}
}
#[stable(feature = "rust1", since = "1.0.0")]
/// Ensures a value is in the entry by inserting the result of the default function if empty,
/// and returns a mutable reference to the value in the entry.
pub fn or_insert_with<F: FnOnce() -> V>(self, default: F) -> &'a mut V {
match self {
Occupied(entry) => entry.into_mut(),
Vacant(entry) => entry.insert(default()),
}
}
}
impl<'a, K: Ord, V> VacantEntry<'a, K, V> {
/// Sets the value of the entry with the VacantEntry's key,
/// and returns a mutable reference to it.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn insert(self, value: V) -> &'a mut V {
self.stack.insert(self.key, value)
}
}
impl<'a, K: Ord, V> OccupiedEntry<'a, K, V> {
/// Gets a reference to the value in the entry.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get(&self) -> &V {
self.stack.peek()
}
/// Gets a mutable reference to the value in the entry.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get_mut(&mut self) -> &mut V {
self.stack.peek_mut()
}
/// Converts the entry into a mutable reference to its value.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn into_mut(self) -> &'a mut V {
self.stack.into_top()
}
/// Sets the value of the entry with the OccupiedEntry's key,
/// and returns the entry's old value.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn insert(&mut self, mut value: V) -> V {
mem::swap(self.stack.peek_mut(), &mut value);
value
}
/// Takes the value of the entry out of the map, and returns it.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn remove(self) -> V {
self.stack.remove().1
}
}
impl<K, V> BTreeMap<K, V> {
/// Gets an iterator over the entries of the map.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut map = BTreeMap::new();
/// map.insert(1, "a");
/// map.insert(2, "b");
/// map.insert(3, "c");
///
/// for (key, value) in map.iter() {
/// println!("{}: {}", key, value);
/// }
///
/// let (first_key, first_value) = map.iter().next().unwrap();
/// assert_eq!((*first_key, *first_value), (1, "a"));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn iter(&self) -> Iter<K, V> {
let len = self.len();
// NB. The initial capacity for ringbuf is large enough to avoid reallocs in many cases.
let mut lca = VecDeque::new();
lca.push_back(Traverse::traverse(&self.root));
Iter {
inner: AbsIter {
traversals: lca,
size: len,
},
}
}
/// Gets a mutable iterator over the entries of the map.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut map = BTreeMap::new();
/// map.insert("a", 1);
/// map.insert("b", 2);
/// map.insert("c", 3);
///
/// // add 10 to the value if the key isn't "a"
/// for (key, value) in map.iter_mut() {
/// if key != &"a" {
/// *value += 10;
/// }
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn iter_mut(&mut self) -> IterMut<K, V> {
let len = self.len();
let mut lca = VecDeque::new();
lca.push_back(Traverse::traverse(&mut self.root));
IterMut {
inner: AbsIter {
traversals: lca,
size: len,
},
}
}
/// Gets an iterator over the keys of the map.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut a = BTreeMap::new();
/// a.insert(1, "a");
/// a.insert(2, "b");
///
/// let keys: Vec<_> = a.keys().cloned().collect();
/// assert_eq!(keys, [1, 2]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn keys<'a>(&'a self) -> Keys<'a, K, V> {
fn first<A, B>((a, _): (A, B)) -> A {
a
}
let first: fn((&'a K, &'a V)) -> &'a K = first; // coerce to fn pointer
Keys { inner: self.iter().map(first) }
}
/// Gets an iterator over the values of the map.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut a = BTreeMap::new();
/// a.insert(1, "a");
/// a.insert(2, "b");
///
/// let values: Vec<&str> = a.values().cloned().collect();
/// assert_eq!(values, ["a", "b"]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn values<'a>(&'a self) -> Values<'a, K, V> {
fn second<A, B>((_, b): (A, B)) -> B {
b
}
let second: fn((&'a K, &'a V)) -> &'a V = second; // coerce to fn pointer
Values { inner: self.iter().map(second) }
}
/// Returns the number of elements in the map.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut a = BTreeMap::new();
/// assert_eq!(a.len(), 0);
/// a.insert(1, "a");
/// assert_eq!(a.len(), 1);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn len(&self) -> usize {
self.length
}
/// Returns true if the map contains no elements.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut a = BTreeMap::new();
/// assert!(a.is_empty());
/// a.insert(1, "a");
/// assert!(!a.is_empty());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
macro_rules! range_impl {
($root:expr, $min:expr, $max:expr, $as_slices_internal:ident, $iter:ident, $Range:ident,
$edges:ident, [$($mutability:ident)*]) => (
{
// A deque that encodes two search paths containing (left-to-right):
// a series of truncated-from-the-left iterators, the LCA's doubly-truncated iterator,
// and a series of truncated-from-the-right iterators.
let mut traversals = VecDeque::new();
let (root, min, max) = ($root, $min, $max);
let mut leftmost = None;
let mut rightmost = None;
match (&min, &max) {
(&Unbounded, &Unbounded) => {
traversals.push_back(Traverse::traverse(root))
}
(&Unbounded, &Included(_)) | (&Unbounded, &Excluded(_)) => {
rightmost = Some(root);
}
(&Included(_), &Unbounded) | (&Excluded(_), &Unbounded) => {
leftmost = Some(root);
}
(&Included(min_key), &Included(max_key))
| (&Included(min_key), &Excluded(max_key))
| (&Excluded(min_key), &Included(max_key))
| (&Excluded(min_key), &Excluded(max_key)) => {
// lca represents the Lowest Common Ancestor, above which we never
// walk, since everything else is outside the range to iterate.
// ___________________
// |__0_|_80_|_85_|_90_| (root)
// | | | | |
// |
// v
// ___________________
// |__5_|_15_|_30_|_73_|
// | | | | |
// |
// v
// ___________________
// |_33_|_58_|_63_|_68_| lca for the range [41, 65]
// | |\___|___/| | iterator at traversals[2]
// | |
// | v
// v rightmost
// leftmost
let mut is_leaf = root.is_leaf();
let mut lca = root.$as_slices_internal();
loop {
let slice = lca.slice_from(min_key).slice_to(max_key);
if let [ref $($mutability)* edge] = slice.edges {
// Follow the only edge that leads the node that covers the range.
is_leaf = edge.is_leaf();
lca = edge.$as_slices_internal();
} else {
let mut iter = slice.$iter();
if is_leaf {
leftmost = None;
rightmost = None;
} else {
// Only change the state of nodes with edges.
leftmost = iter.next_edge_item();
rightmost = iter.next_edge_item_back();
}
traversals.push_back(iter);
break;
}
}
}
}
// Keep narrowing the range by going down.
// ___________________
// |_38_|_43_|_48_|_53_|
// | |____|____|____/ iterator at traversals[1]
// |
// v
// ___________________
// |_39_|_40_|_41_|_42_| (leaf, the last leftmost)
// \_________| iterator at traversals[0]
match min {
Included(key) | Excluded(key) =>
while let Some(left) = leftmost {
let is_leaf = left.is_leaf();
let mut iter = left.$as_slices_internal().slice_from(key).$iter();
leftmost = if is_leaf {
None
} else {
// Only change the state of nodes with edges.
iter.next_edge_item()
};
traversals.push_back(iter);
},
_ => {}
}
// If the leftmost iterator starts with an element, then it was an exact match.
if let (Excluded(_), Some(leftmost_iter)) = (min, traversals.back_mut()) {
// Drop this excluded element. `next_kv_item` has no effect when
// the next item is an edge.
leftmost_iter.next_kv_item();
}
// The code for the right side is similar.
match max {
Included(key) | Excluded(key) =>
while let Some(right) = rightmost {
let is_leaf = right.is_leaf();
let mut iter = right.$as_slices_internal().slice_to(key).$iter();
rightmost = if is_leaf {
None
} else {
iter.next_edge_item_back()
};
traversals.push_front(iter);
},
_ => {}
}
if let (Excluded(_), Some(rightmost_iter)) = (max, traversals.front_mut()) {
rightmost_iter.next_kv_item_back();
}
$Range {
inner: AbsIter {
traversals: traversals,
size: usize::MAX, // unused
}
}
}
)
}
impl<K: Ord, V> BTreeMap<K, V> {
/// Constructs a double-ended iterator over a sub-range of elements in the map, starting
/// at min, and ending at max. If min is `Unbounded`, then it will be treated as "negative
/// infinity", and if max is `Unbounded`, then it will be treated as "positive infinity".
/// Thus range(Unbounded, Unbounded) will yield the whole collection.
///
/// # Examples
///
/// ```
/// #![feature(btree_range, collections_bound)]
///
/// use std::collections::BTreeMap;
/// use std::collections::Bound::{Included, Unbounded};
///
/// let mut map = BTreeMap::new();
/// map.insert(3, "a");
/// map.insert(5, "b");
/// map.insert(8, "c");
/// for (&key, &value) in map.range(Included(&4), Included(&8)) {
/// println!("{}: {}", key, value);
/// }
/// assert_eq!(Some((&5, &"b")), map.range(Included(&4), Unbounded).next());
/// ```
#[unstable(feature = "btree_range",
reason = "matches collection reform specification, waiting for dust to settle",
issue = "27787")]
pub fn range<Min: ?Sized + Ord = K, Max: ?Sized + Ord = K>(&self,
min: Bound<&Min>,
max: Bound<&Max>)
-> Range<K, V>
where K: Borrow<Min> + Borrow<Max>
{
range_impl!(&self.root,
min,
max,
as_slices_internal,
iter,
Range,
edges,
[])
}
/// Constructs a mutable double-ended iterator over a sub-range of elements in the map, starting
/// at min, and ending at max. If min is `Unbounded`, then it will be treated as "negative
/// infinity", and if max is `Unbounded`, then it will be treated as "positive infinity".
/// Thus range(Unbounded, Unbounded) will yield the whole collection.
///
/// # Examples
///
/// ```
/// #![feature(btree_range, collections_bound)]
///
/// use std::collections::BTreeMap;
/// use std::collections::Bound::{Included, Excluded};
///
/// let mut map: BTreeMap<&str, i32> = ["Alice", "Bob", "Carol", "Cheryl"].iter()
/// .map(|&s| (s, 0))
/// .collect();
/// for (_, balance) in map.range_mut(Included("B"), Excluded("Cheryl")) {
/// *balance += 100;
/// }
/// for (name, balance) in &map {
/// println!("{} => {}", name, balance);
/// }
/// ```
#[unstable(feature = "btree_range",
reason = "matches collection reform specification, waiting for dust to settle",
issue = "27787")]
pub fn range_mut<Min: ?Sized + Ord = K, Max: ?Sized + Ord = K>(&mut self,
min: Bound<&Min>,
max: Bound<&Max>)
-> RangeMut<K, V>
where K: Borrow<Min> + Borrow<Max>
{
range_impl!(&mut self.root,
min,
max,
as_slices_internal_mut,
iter_mut,
RangeMut,
edges_mut,
[mut])
}
/// Gets the given key's corresponding entry in the map for in-place manipulation.
///
/// # Examples
///
/// ```
/// use std::collections::BTreeMap;
///
/// let mut count: BTreeMap<&str, usize> = BTreeMap::new();
///
/// // count the number of occurrences of letters in the vec
/// for x in vec!["a","b","a","c","a","b"] {
/// *count.entry(x).or_insert(0) += 1;
/// }
///
/// assert_eq!(count["a"], 3);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn entry(&mut self, mut key: K) -> Entry<K, V> {
// same basic logic of `swap` and `pop`, blended together
let mut stack = stack::PartialSearchStack::new(self);
loop {
let result = stack.with(move |pusher, node| {
match Node::search(node, &key) {
Found(handle) => {
// Perfect match
Finished(Occupied(OccupiedEntry { stack: pusher.seal(handle) }))
}
GoDown(handle) => {
match handle.force() {
Leaf(leaf_handle) => {
Finished(Vacant(VacantEntry {
stack: pusher.seal(leaf_handle),
key: key,
}))
}
Internal(internal_handle) => {
Continue((pusher.push(internal_handle), key))
}
}
}
}
});
match result {
Finished(finished) => return finished,
Continue((new_stack, renewed_key)) => {
stack = new_stack;
key = renewed_key;
}
}
}
}
}
impl<K, Q: ?Sized> super::Recover<Q> for BTreeMap<K, ()>
where K: Borrow<Q> + Ord,
Q: Ord
{
type Key = K;
fn get(&self, key: &Q) -> Option<&K> {
let mut cur_node = &self.root;
loop {
match Node::search(cur_node, key) {
Found(handle) => return Some(handle.into_kv().0),
GoDown(handle) => {
match handle.force() {
Leaf(_) => return None,
Internal(internal_handle) => {
cur_node = internal_handle.into_edge();
continue;
}
}
}
}
}
}
fn take(&mut self, key: &Q) -> Option<K> {
// See `remove` for an explanation of this.
let mut stack = stack::PartialSearchStack::new(self);
loop {
let result = stack.with(move |pusher, node| {
match Node::search(node, key) {
Found(handle) => {
// Perfect match. Terminate the stack here, and remove the entry
Finished(Some(pusher.seal(handle).remove()))
}
GoDown(handle) => {
// We need to keep searching, try to go down the next edge
match handle.force() {
// We're at a leaf; the key isn't in here
Leaf(_) => Finished(None),
Internal(internal_handle) => Continue(pusher.push(internal_handle)),
}
}
}
});
match result {
Finished(ret) => return ret.map(|(k, _)| k),
Continue(new_stack) => stack = new_stack,
}
}
}
fn replace(&mut self, mut key: K) -> Option<K> {
// See `insert` for an explanation of this.
let mut stack = stack::PartialSearchStack::new(self);
loop {
let result = stack.with(move |pusher, node| {
match Node::search::<K, _>(node, &key) {
Found(mut handle) => {
mem::swap(handle.key_mut(), &mut key);
Finished(Some(key))
}
GoDown(handle) => {
match handle.force() {
Leaf(leaf_handle) => {
pusher.seal(leaf_handle).insert(key, ());
Finished(None)
}
Internal(internal_handle) => {
Continue((pusher.push(internal_handle), key, ()))
}
}
}
}
});
match result {
Finished(ret) => return ret,
Continue((new_stack, renewed_key, _)) => {
stack = new_stack;
key = renewed_key;
}
}
}
}
}
| 38.317522 | 100 | 0.501347 |
61a0eb417ac8aba58d82787785dd97f89564f379 | 584 | use std::io::{self, Write};
use extended_io::pipe::{PipeRead, PipeWrite};
use super::intcode_interpreter::IntcodeInterpreter;
pub(super) fn run() -> io::Result<()> {
let prog = IntcodeInterpreter::<PipeRead, PipeWrite>::read_from_file("2019_9.txt")?;
{
println!("Year 2019 Day 9 Part 1");
print!("Enter mode id: ");
io::stdout().flush()?;
prog.dup::<PipeRead, PipeWrite>().run();
}
{
println!("Year 2019 Day 9 Part 2");
print!("Enter mode id: ");
io::stdout().flush()?;
prog.run();
}
Ok(())
}
| 25.391304 | 88 | 0.556507 |
4a5de3f62fc576db62a51a4b3233027a64b4ca9a | 2,212 | use self::super::util::uppercase_first;
use std::borrow::Cow;
use std::io::Write;
/// Enum representing all possible ways the application can fail.
#[derive(Debug, Clone, Hash, PartialEq, Eq)]
pub enum Error {
/// An I/O error occured.
///
/// This includes higher-level I/O errors like FS ones.
Io {
/// The file the I/O operation regards.
desc: &'static str,
/// The failed operation.
///
/// This should be lowercase and imperative ("create", "open").
op: &'static str,
/// Additional data.
more: Option<Cow<'static, str>>,
},
}
impl Error {
/// Write the error message to the specified output stream.
///
/// # Examples
///
/// ```
/// # use https::Error;
/// # use std::iter::FromIterator;
/// let mut out = Vec::new();
/// Error::Io {
/// desc: "network",
/// op: "write",
/// more: Some("full buffer"),
/// }.print_error(&mut out);
/// assert_eq!(String::from_iter(out.iter().map(|&i| i as char)),
/// "Writing network failed: full buffer.\n".to_string());
/// ```
pub fn print_error<W: Write>(&self, err_out: &mut W) {
match *self {
Error::Io { desc, op, ref more } => {
// Strip the last 'e', if any, so we get correct inflection for continuous times
let op = uppercase_first(if op.ends_with('e') {
&op[..op.len() - 1]
} else {
op
});
write!(err_out, "{}ing {} failed", op, desc).unwrap();
if let &Some(ref more) = more {
write!(err_out, ": {}", more).unwrap();
}
writeln!(err_out, ".").unwrap();
}
}
}
/// Get the executable exit value from an `Error` instance.
///
/// # Examples
///
/// ```
/// # use https::Error;
/// assert_eq!(Error::Io {
/// desc: "",
/// op: "",
/// more: None,
/// }.exit_value(), 1);
/// ```
pub fn exit_value(&self) -> i32 {
match *self {
Error::Io { .. } => 1,
}
}
}
| 28.727273 | 96 | 0.466546 |
67f16bb6cf2fb351fc0352e2b65a42ac05a40a74 | 780 | use minime::{
editor::{keybindings::DebugKeybinding, Editor},
renderer::{
full::CrosstermRenderer,
styles::fancy::{FancyFooter, FancyGutter, FancyHeader},
},
Result,
};
fn main() -> Result<()> {
// Redirect our output to stdout (default).
let stdout = std::io::stdout();
let mut lock = stdout.lock();
let renderer = CrosstermRenderer::render_to(&mut lock)
.max_height(Some(10))
.header(FancyHeader {
message: "Debug keybindings for keybinding development",
})
.margin(FancyGutter)
.footer(FancyFooter);
// Print out some prompt using styling options.
let mut term = Editor::default();
term.read(DebugKeybinding, renderer)?;
dbg!(term.contents());
Ok(())
}
| 26 | 68 | 0.610256 |
e57536c9555fbf2ee97bc271a14d8ad73b725aba | 903 | // Copyright (c) The Dijets Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::values::{prop::layout_and_value_strategy, Value};
use move_core_types::value::MoveValue;
use proptest::prelude::*;
proptest! {
#[test]
fn serializer_round_trip((layout, value) in layout_and_value_strategy()) {
let blob = value.simple_serialize(&layout).expect("must serialize");
let value_deserialized = Value::simple_deserialize(&blob, &layout).expect("must deserialize");
assert!(value.equals(&value_deserialized).unwrap());
let move_value = value.as_move_value(&layout);
let blob2 = move_value.simple_serialize().expect("must serialize");
assert_eq!(blob, blob2);
let move_value_deserialized = MoveValue::simple_deserialize(&blob2, &layout).expect("must deserialize.");
assert_eq!(move_value, move_value_deserialized);
}
}
| 37.625 | 113 | 0.707641 |
3acd1c86c56aa7624eb42b28fc2810374f2be62d | 4,802 | use msgs::enums::SignatureScheme;
use sign;
use key;
use client;
use std::collections;
use std::sync::{Arc, Mutex};
/// An implementor of `StoresClientSessions` which does nothing.
pub struct NoClientSessionStorage {}
impl client::StoresClientSessions for NoClientSessionStorage {
fn put(&self, _key: Vec<u8>, _value: Vec<u8>) -> bool {
false
}
fn get(&self, _key: &[u8]) -> Option<Vec<u8>> {
None
}
}
/// An implementor of `StoresClientSessions` that stores everything
/// in memory. It enforces a limit on the number of entries
/// to bound memory usage.
pub struct ClientSessionMemoryCache {
cache: Mutex<collections::HashMap<Vec<u8>, Vec<u8>>>,
max_entries: usize,
}
impl ClientSessionMemoryCache {
/// Make a new ClientSessionMemoryCache. `size` is the
/// maximum number of stored sessions.
pub fn new(size: usize) -> Arc<ClientSessionMemoryCache> {
debug_assert!(size > 0);
Arc::new(ClientSessionMemoryCache {
cache: Mutex::new(collections::HashMap::new()),
max_entries: size,
})
}
fn limit_size(&self) {
let mut cache = self.cache.lock().unwrap();
while cache.len() > self.max_entries {
let k = cache.keys().next().unwrap().clone();
cache.remove(&k);
}
}
}
impl client::StoresClientSessions for ClientSessionMemoryCache {
fn put(&self, key: Vec<u8>, value: Vec<u8>) -> bool {
self.cache.lock()
.unwrap()
.insert(key, value);
self.limit_size();
true
}
fn get(&self, key: &[u8]) -> Option<Vec<u8>> {
self.cache.lock()
.unwrap()
.get(key).cloned()
}
}
pub struct FailResolveClientCert {}
impl client::ResolvesClientCert for FailResolveClientCert {
fn resolve(&self,
_acceptable_issuers: &[&[u8]],
_sigschemes: &[SignatureScheme])
-> Option<sign::CertifiedKey> {
None
}
fn has_certs(&self) -> bool {
false
}
}
pub struct AlwaysResolvesClientCert(sign::CertifiedKey);
impl AlwaysResolvesClientCert {
pub fn new(chain: Vec<key::Certificate>,
priv_key: &key::PrivateKey) -> AlwaysResolvesClientCert {
let key = sign::any_supported_type(priv_key)
.expect("Invalid private key");
AlwaysResolvesClientCert(sign::CertifiedKey::new(chain, Arc::new(key)))
}
}
impl client::ResolvesClientCert for AlwaysResolvesClientCert {
fn resolve(&self,
_acceptable_issuers: &[&[u8]],
_sigschemes: &[SignatureScheme])
-> Option<sign::CertifiedKey> {
Some(self.0.clone())
}
fn has_certs(&self) -> bool {
true
}
}
#[cfg(test)]
mod test {
use super::*;
use StoresClientSessions;
#[test]
fn test_noclientsessionstorage_drops_put() {
let c = NoClientSessionStorage {};
assert_eq!(c.put(vec![0x01], vec![0x02]), false);
}
#[test]
fn test_noclientsessionstorage_denies_gets() {
let c = NoClientSessionStorage {};
c.put(vec![0x01], vec![0x02]);
assert_eq!(c.get(&[]), None);
assert_eq!(c.get(&[0x01]), None);
assert_eq!(c.get(&[0x02]), None);
}
#[test]
fn test_clientsessionmemorycache_accepts_put() {
let c = ClientSessionMemoryCache::new(4);
assert_eq!(c.put(vec![0x01], vec![0x02]), true);
}
#[test]
fn test_clientsessionmemorycache_persists_put() {
let c = ClientSessionMemoryCache::new(4);
assert_eq!(c.put(vec![0x01], vec![0x02]), true);
assert_eq!(c.get(&[0x01]), Some(vec![0x02]));
assert_eq!(c.get(&[0x01]), Some(vec![0x02]));
}
#[test]
fn test_clientsessionmemorycache_overwrites_put() {
let c = ClientSessionMemoryCache::new(4);
assert_eq!(c.put(vec![0x01], vec![0x02]), true);
assert_eq!(c.put(vec![0x01], vec![0x04]), true);
assert_eq!(c.get(&[0x01]), Some(vec![0x04]));
}
#[test]
fn test_clientsessionmemorycache_drops_to_maintain_size_invariant() {
let c = ClientSessionMemoryCache::new(4);
assert_eq!(c.put(vec![0x01], vec![0x02]), true);
assert_eq!(c.put(vec![0x03], vec![0x04]), true);
assert_eq!(c.put(vec![0x05], vec![0x06]), true);
assert_eq!(c.put(vec![0x07], vec![0x08]), true);
assert_eq!(c.put(vec![0x09], vec![0x0a]), true);
let mut count = 0;
if c.get(&[0x01]).is_some() { count += 1; }
if c.get(&[0x03]).is_some() { count += 1; }
if c.get(&[0x05]).is_some() { count += 1; }
if c.get(&[0x07]).is_some() { count += 1; }
if c.get(&[0x09]).is_some() { count += 1; }
assert_eq!(count, 4);
}
}
| 28.927711 | 79 | 0.586006 |
26f56ffacae7fd5c7a53968045d39991678488bc | 54,798 | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(non_snake_case)]
// Error messages for EXXXX errors.
// Each message should start and end with a new line, and be wrapped to 80 characters.
// In vim you can `:set tw=80` and use `gq` to wrap paragraphs. Use `:set tw=0` to disable.
register_long_diagnostics! {
E0020: r##"
This error indicates that an attempt was made to divide by zero (or take the
remainder of a zero divisor) in a static or constant expression. Erroneous
code example:
```compile_fail
#[deny(const_err)]
const X: i32 = 42 / 0;
// error: attempt to divide by zero in a constant expression
```
"##,
E0038: r##"
Trait objects like `Box<Trait>` can only be constructed when certain
requirements are satisfied by the trait in question.
Trait objects are a form of dynamic dispatch and use a dynamically sized type
for the inner type. So, for a given trait `Trait`, when `Trait` is treated as a
type, as in `Box<Trait>`, the inner type is 'unsized'. In such cases the boxed
pointer is a 'fat pointer' that contains an extra pointer to a table of methods
(among other things) for dynamic dispatch. This design mandates some
restrictions on the types of traits that are allowed to be used in trait
objects, which are collectively termed as 'object safety' rules.
Attempting to create a trait object for a non object-safe trait will trigger
this error.
There are various rules:
### The trait cannot require `Self: Sized`
When `Trait` is treated as a type, the type does not implement the special
`Sized` trait, because the type does not have a known size at compile time and
can only be accessed behind a pointer. Thus, if we have a trait like the
following:
```
trait Foo where Self: Sized {
}
```
We cannot create an object of type `Box<Foo>` or `&Foo` since in this case
`Self` would not be `Sized`.
Generally, `Self : Sized` is used to indicate that the trait should not be used
as a trait object. If the trait comes from your own crate, consider removing
this restriction.
### Method references the `Self` type in its arguments or return type
This happens when a trait has a method like the following:
```
trait Trait {
fn foo(&self) -> Self;
}
impl Trait for String {
fn foo(&self) -> Self {
"hi".to_owned()
}
}
impl Trait for u8 {
fn foo(&self) -> Self {
1
}
}
```
(Note that `&self` and `&mut self` are okay, it's additional `Self` types which
cause this problem.)
In such a case, the compiler cannot predict the return type of `foo()` in a
situation like the following:
```compile_fail
trait Trait {
fn foo(&self) -> Self;
}
fn call_foo(x: Box<Trait>) {
let y = x.foo(); // What type is y?
// ...
}
```
If only some methods aren't object-safe, you can add a `where Self: Sized` bound
on them to mark them as explicitly unavailable to trait objects. The
functionality will still be available to all other implementers, including
`Box<Trait>` which is itself sized (assuming you `impl Trait for Box<Trait>`).
```
trait Trait {
fn foo(&self) -> Self where Self: Sized;
// more functions
}
```
Now, `foo()` can no longer be called on a trait object, but you will now be
allowed to make a trait object, and that will be able to call any object-safe
methods. With such a bound, one can still call `foo()` on types implementing
that trait that aren't behind trait objects.
### Method has generic type parameters
As mentioned before, trait objects contain pointers to method tables. So, if we
have:
```
trait Trait {
fn foo(&self);
}
impl Trait for String {
fn foo(&self) {
// implementation 1
}
}
impl Trait for u8 {
fn foo(&self) {
// implementation 2
}
}
// ...
```
At compile time each implementation of `Trait` will produce a table containing
the various methods (and other items) related to the implementation.
This works fine, but when the method gains generic parameters, we can have a
problem.
Usually, generic parameters get _monomorphized_. For example, if I have
```
fn foo<T>(x: T) {
// ...
}
```
The machine code for `foo::<u8>()`, `foo::<bool>()`, `foo::<String>()`, or any
other type substitution is different. Hence the compiler generates the
implementation on-demand. If you call `foo()` with a `bool` parameter, the
compiler will only generate code for `foo::<bool>()`. When we have additional
type parameters, the number of monomorphized implementations the compiler
generates does not grow drastically, since the compiler will only generate an
implementation if the function is called with unparametrized substitutions
(i.e., substitutions where none of the substituted types are themselves
parametrized).
However, with trait objects we have to make a table containing _every_ object
that implements the trait. Now, if it has type parameters, we need to add
implementations for every type that implements the trait, and there could
theoretically be an infinite number of types.
For example, with:
```
trait Trait {
fn foo<T>(&self, on: T);
// more methods
}
impl Trait for String {
fn foo<T>(&self, on: T) {
// implementation 1
}
}
impl Trait for u8 {
fn foo<T>(&self, on: T) {
// implementation 2
}
}
// 8 more implementations
```
Now, if we have the following code:
```compile_fail,E0038
# trait Trait { fn foo<T>(&self, on: T); }
# impl Trait for String { fn foo<T>(&self, on: T) {} }
# impl Trait for u8 { fn foo<T>(&self, on: T) {} }
# impl Trait for bool { fn foo<T>(&self, on: T) {} }
# // etc.
fn call_foo(thing: Box<Trait>) {
thing.foo(true); // this could be any one of the 8 types above
thing.foo(1);
thing.foo("hello");
}
```
We don't just need to create a table of all implementations of all methods of
`Trait`, we need to create such a table, for each different type fed to
`foo()`. In this case this turns out to be (10 types implementing `Trait`)*(3
types being fed to `foo()`) = 30 implementations!
With real world traits these numbers can grow drastically.
To fix this, it is suggested to use a `where Self: Sized` bound similar to the
fix for the sub-error above if you do not intend to call the method with type
parameters:
```
trait Trait {
fn foo<T>(&self, on: T) where Self: Sized;
// more methods
}
```
If this is not an option, consider replacing the type parameter with another
trait object (e.g. if `T: OtherTrait`, use `on: Box<OtherTrait>`). If the number
of types you intend to feed to this method is limited, consider manually listing
out the methods of different types.
### Method has no receiver
Methods that do not take a `self` parameter can't be called since there won't be
a way to get a pointer to the method table for them.
```
trait Foo {
fn foo() -> u8;
}
```
This could be called as `<Foo as Foo>::foo()`, which would not be able to pick
an implementation.
Adding a `Self: Sized` bound to these methods will generally make this compile.
```
trait Foo {
fn foo() -> u8 where Self: Sized;
}
```
### The trait cannot use `Self` as a type parameter in the supertrait listing
This is similar to the second sub-error, but subtler. It happens in situations
like the following:
```compile_fail
trait Super<A> {}
trait Trait: Super<Self> {
}
struct Foo;
impl Super<Foo> for Foo{}
impl Trait for Foo {}
```
Here, the supertrait might have methods as follows:
```
trait Super<A> {
fn get_a(&self) -> A; // note that this is object safe!
}
```
If the trait `Foo` was deriving from something like `Super<String>` or
`Super<T>` (where `Foo` itself is `Foo<T>`), this is okay, because given a type
`get_a()` will definitely return an object of that type.
However, if it derives from `Super<Self>`, even though `Super` is object safe,
the method `get_a()` would return an object of unknown type when called on the
function. `Self` type parameters let us make object safe traits no longer safe,
so they are forbidden when specifying supertraits.
There's no easy fix for this, generally code will need to be refactored so that
you no longer need to derive from `Super<Self>`.
"##,
E0072: r##"
When defining a recursive struct or enum, any use of the type being defined
from inside the definition must occur behind a pointer (like `Box` or `&`).
This is because structs and enums must have a well-defined size, and without
the pointer, the size of the type would need to be unbounded.
Consider the following erroneous definition of a type for a list of bytes:
```compile_fail,E0072
// error, invalid recursive struct type
struct ListNode {
head: u8,
tail: Option<ListNode>,
}
```
This type cannot have a well-defined size, because it needs to be arbitrarily
large (since we would be able to nest `ListNode`s to any depth). Specifically,
```plain
size of `ListNode` = 1 byte for `head`
+ 1 byte for the discriminant of the `Option`
+ size of `ListNode`
```
One way to fix this is by wrapping `ListNode` in a `Box`, like so:
```
struct ListNode {
head: u8,
tail: Option<Box<ListNode>>,
}
```
This works because `Box` is a pointer, so its size is well-known.
"##,
E0080: r##"
This error indicates that the compiler was unable to sensibly evaluate an
constant expression that had to be evaluated. Attempting to divide by 0
or causing integer overflow are two ways to induce this error. For example:
```compile_fail,E0080
enum Enum {
X = (1 << 500),
Y = (1 / 0)
}
```
Ensure that the expressions given can be evaluated as the desired integer type.
See the FFI section of the Reference for more information about using a custom
integer type:
https://doc.rust-lang.org/reference.html#ffi-attributes
"##,
E0106: r##"
This error indicates that a lifetime is missing from a type. If it is an error
inside a function signature, the problem may be with failing to adhere to the
lifetime elision rules (see below).
Here are some simple examples of where you'll run into this error:
```compile_fail,E0106
struct Foo { x: &bool } // error
struct Foo<'a> { x: &'a bool } // correct
struct Bar { x: Foo }
^^^ expected lifetime parameter
struct Bar<'a> { x: Foo<'a> } // correct
enum Bar { A(u8), B(&bool), } // error
enum Bar<'a> { A(u8), B(&'a bool), } // correct
type MyStr = &str; // error
type MyStr<'a> = &'a str; // correct
```
Lifetime elision is a special, limited kind of inference for lifetimes in
function signatures which allows you to leave out lifetimes in certain cases.
For more background on lifetime elision see [the book][book-le].
The lifetime elision rules require that any function signature with an elided
output lifetime must either have
- exactly one input lifetime
- or, multiple input lifetimes, but the function must also be a method with a
`&self` or `&mut self` receiver
In the first case, the output lifetime is inferred to be the same as the unique
input lifetime. In the second case, the lifetime is instead inferred to be the
same as the lifetime on `&self` or `&mut self`.
Here are some examples of elision errors:
```compile_fail,E0106
// error, no input lifetimes
fn foo() -> &str { }
// error, `x` and `y` have distinct lifetimes inferred
fn bar(x: &str, y: &str) -> &str { }
// error, `y`'s lifetime is inferred to be distinct from `x`'s
fn baz<'a>(x: &'a str, y: &str) -> &str { }
```
Here's an example that is currently an error, but may work in a future version
of Rust:
```compile_fail,E0106
struct Foo<'a>(&'a str);
trait Quux { }
impl Quux for Foo { }
```
Lifetime elision in implementation headers was part of the lifetime elision
RFC. It is, however, [currently unimplemented][iss15872].
[book-le]: https://doc.rust-lang.org/nightly/book/first-edition/lifetimes.html#lifetime-elision
[iss15872]: https://github.com/rust-lang/rust/issues/15872
"##,
E0119: r##"
There are conflicting trait implementations for the same type.
Example of erroneous code:
```compile_fail,E0119
trait MyTrait {
fn get(&self) -> usize;
}
impl<T> MyTrait for T {
fn get(&self) -> usize { 0 }
}
struct Foo {
value: usize
}
impl MyTrait for Foo { // error: conflicting implementations of trait
// `MyTrait` for type `Foo`
fn get(&self) -> usize { self.value }
}
```
When looking for the implementation for the trait, the compiler finds
both the `impl<T> MyTrait for T` where T is all types and the `impl
MyTrait for Foo`. Since a trait cannot be implemented multiple times,
this is an error. So, when you write:
```
trait MyTrait {
fn get(&self) -> usize;
}
impl<T> MyTrait for T {
fn get(&self) -> usize { 0 }
}
```
This makes the trait implemented on all types in the scope. So if you
try to implement it on another one after that, the implementations will
conflict. Example:
```
trait MyTrait {
fn get(&self) -> usize;
}
impl<T> MyTrait for T {
fn get(&self) -> usize { 0 }
}
struct Foo;
fn main() {
let f = Foo;
f.get(); // the trait is implemented so we can use it
}
```
"##,
// This shouldn't really ever trigger since the repeated value error comes first
E0136: r##"
A binary can only have one entry point, and by default that entry point is the
function `main()`. If there are multiple such functions, please rename one.
"##,
E0137: r##"
More than one function was declared with the `#[main]` attribute.
Erroneous code example:
```compile_fail,E0137
#![feature(main)]
#[main]
fn foo() {}
#[main]
fn f() {} // error: multiple functions with a #[main] attribute
```
This error indicates that the compiler found multiple functions with the
`#[main]` attribute. This is an error because there must be a unique entry
point into a Rust program. Example:
```
#![feature(main)]
#[main]
fn f() {} // ok!
```
"##,
E0138: r##"
More than one function was declared with the `#[start]` attribute.
Erroneous code example:
```compile_fail,E0138
#![feature(start)]
#[start]
fn foo(argc: isize, argv: *const *const u8) -> isize {}
#[start]
fn f(argc: isize, argv: *const *const u8) -> isize {}
// error: multiple 'start' functions
```
This error indicates that the compiler found multiple functions with the
`#[start]` attribute. This is an error because there must be a unique entry
point into a Rust program. Example:
```
#![feature(start)]
#[start]
fn foo(argc: isize, argv: *const *const u8) -> isize { 0 } // ok!
```
"##,
E0139: r##"
#### Note: this error code is no longer emitted by the compiler.
There are various restrictions on transmuting between types in Rust; for example
types being transmuted must have the same size. To apply all these restrictions,
the compiler must know the exact types that may be transmuted. When type
parameters are involved, this cannot always be done.
So, for example, the following is not allowed:
```
use std::mem::transmute;
struct Foo<T>(Vec<T>);
fn foo<T>(x: Vec<T>) {
// we are transmuting between Vec<T> and Foo<F> here
let y: Foo<T> = unsafe { transmute(x) };
// do something with y
}
```
In this specific case there's a good chance that the transmute is harmless (but
this is not guaranteed by Rust). However, when alignment and enum optimizations
come into the picture, it's quite likely that the sizes may or may not match
with different type parameter substitutions. It's not possible to check this for
_all_ possible types, so `transmute()` simply only accepts types without any
unsubstituted type parameters.
If you need this, there's a good chance you're doing something wrong. Keep in
mind that Rust doesn't guarantee much about the layout of different structs
(even two structs with identical declarations may have different layouts). If
there is a solution that avoids the transmute entirely, try it instead.
If it's possible, hand-monomorphize the code by writing the function for each
possible type substitution. It's possible to use traits to do this cleanly,
for example:
```
use std::mem::transmute;
struct Foo<T>(Vec<T>);
trait MyTransmutableType: Sized {
fn transmute(_: Vec<Self>) -> Foo<Self>;
}
impl MyTransmutableType for u8 {
fn transmute(x: Vec<u8>) -> Foo<u8> {
unsafe { transmute(x) }
}
}
impl MyTransmutableType for String {
fn transmute(x: Vec<String>) -> Foo<String> {
unsafe { transmute(x) }
}
}
// ... more impls for the types you intend to transmute
fn foo<T: MyTransmutableType>(x: Vec<T>) {
let y: Foo<T> = <T as MyTransmutableType>::transmute(x);
// do something with y
}
```
Each impl will be checked for a size match in the transmute as usual, and since
there are no unbound type parameters involved, this should compile unless there
is a size mismatch in one of the impls.
It is also possible to manually transmute:
```
# use std::ptr;
# let v = Some("value");
# type SomeType = &'static [u8];
unsafe {
ptr::read(&v as *const _ as *const SomeType) // `v` transmuted to `SomeType`
}
# ;
```
Note that this does not move `v` (unlike `transmute`), and may need a
call to `mem::forget(v)` in case you want to avoid destructors being called.
"##,
E0152: r##"
A lang item was redefined.
Erroneous code example:
```compile_fail,E0152
#![feature(lang_items)]
#[lang = "panic_fmt"]
struct Foo; // error: duplicate lang item found: `panic_fmt`
```
Lang items are already implemented in the standard library. Unless you are
writing a free-standing application (e.g. a kernel), you do not need to provide
them yourself.
You can build a free-standing crate by adding `#![no_std]` to the crate
attributes:
```ignore (only-for-syntax-highlight)
#![no_std]
```
See also https://doc.rust-lang.org/book/first-edition/no-stdlib.html
"##,
E0214: r##"
A generic type was described using parentheses rather than angle brackets.
For example:
```compile_fail,E0214
fn main() {
let v: Vec(&str) = vec!["foo"];
}
```
This is not currently supported: `v` should be defined as `Vec<&str>`.
Parentheses are currently only used with generic types when defining parameters
for `Fn`-family traits.
"##,
E0230: r##"
The `#[rustc_on_unimplemented]` attribute lets you specify a custom error
message for when a particular trait isn't implemented on a type placed in a
position that needs that trait. For example, when the following code is
compiled:
```compile_fail
#![feature(on_unimplemented)]
fn foo<T: Index<u8>>(x: T){}
#[rustc_on_unimplemented = "the type `{Self}` cannot be indexed by `{Idx}`"]
trait Index<Idx> { /* ... */ }
foo(true); // `bool` does not implement `Index<u8>`
```
There will be an error about `bool` not implementing `Index<u8>`, followed by a
note saying "the type `bool` cannot be indexed by `u8`".
As you can see, you can specify type parameters in curly braces for
substitution with the actual types (using the regular format string syntax) in
a given situation. Furthermore, `{Self}` will substitute to the type (in this
case, `bool`) that we tried to use.
This error appears when the curly braces contain an identifier which doesn't
match with any of the type parameters or the string `Self`. This might happen
if you misspelled a type parameter, or if you intended to use literal curly
braces. If it is the latter, escape the curly braces with a second curly brace
of the same type; e.g. a literal `{` is `{{`.
"##,
E0231: r##"
The `#[rustc_on_unimplemented]` attribute lets you specify a custom error
message for when a particular trait isn't implemented on a type placed in a
position that needs that trait. For example, when the following code is
compiled:
```compile_fail
#![feature(on_unimplemented)]
fn foo<T: Index<u8>>(x: T){}
#[rustc_on_unimplemented = "the type `{Self}` cannot be indexed by `{Idx}`"]
trait Index<Idx> { /* ... */ }
foo(true); // `bool` does not implement `Index<u8>`
```
there will be an error about `bool` not implementing `Index<u8>`, followed by a
note saying "the type `bool` cannot be indexed by `u8`".
As you can see, you can specify type parameters in curly braces for
substitution with the actual types (using the regular format string syntax) in
a given situation. Furthermore, `{Self}` will substitute to the type (in this
case, `bool`) that we tried to use.
This error appears when the curly braces do not contain an identifier. Please
add one of the same name as a type parameter. If you intended to use literal
braces, use `{{` and `}}` to escape them.
"##,
E0232: r##"
The `#[rustc_on_unimplemented]` attribute lets you specify a custom error
message for when a particular trait isn't implemented on a type placed in a
position that needs that trait. For example, when the following code is
compiled:
```compile_fail
#![feature(on_unimplemented)]
fn foo<T: Index<u8>>(x: T){}
#[rustc_on_unimplemented = "the type `{Self}` cannot be indexed by `{Idx}`"]
trait Index<Idx> { /* ... */ }
foo(true); // `bool` does not implement `Index<u8>`
```
there will be an error about `bool` not implementing `Index<u8>`, followed by a
note saying "the type `bool` cannot be indexed by `u8`".
For this to work, some note must be specified. An empty attribute will not do
anything, please remove the attribute or add some helpful note for users of the
trait.
"##,
E0261: r##"
When using a lifetime like `'a` in a type, it must be declared before being
used.
These two examples illustrate the problem:
```compile_fail,E0261
// error, use of undeclared lifetime name `'a`
fn foo(x: &'a str) { }
struct Foo {
// error, use of undeclared lifetime name `'a`
x: &'a str,
}
```
These can be fixed by declaring lifetime parameters:
```
fn foo<'a>(x: &'a str) {}
struct Foo<'a> {
x: &'a str,
}
```
"##,
E0262: r##"
Declaring certain lifetime names in parameters is disallowed. For example,
because the `'static` lifetime is a special built-in lifetime name denoting
the lifetime of the entire program, this is an error:
```compile_fail,E0262
// error, invalid lifetime parameter name `'static`
fn foo<'static>(x: &'static str) { }
```
"##,
E0263: r##"
A lifetime name cannot be declared more than once in the same scope. For
example:
```compile_fail,E0263
// error, lifetime name `'a` declared twice in the same scope
fn foo<'a, 'b, 'a>(x: &'a str, y: &'b str) { }
```
"##,
E0264: r##"
An unknown external lang item was used. Erroneous code example:
```compile_fail,E0264
#![feature(lang_items)]
extern "C" {
#[lang = "cake"] // error: unknown external lang item: `cake`
fn cake();
}
```
A list of available external lang items is available in
`src/librustc/middle/weak_lang_items.rs`. Example:
```
#![feature(lang_items)]
extern "C" {
#[lang = "panic_fmt"] // ok!
fn cake();
}
```
"##,
E0271: r##"
This is because of a type mismatch between the associated type of some
trait (e.g. `T::Bar`, where `T` implements `trait Quux { type Bar; }`)
and another type `U` that is required to be equal to `T::Bar`, but is not.
Examples follow.
Here is a basic example:
```compile_fail,E0271
trait Trait { type AssociatedType; }
fn foo<T>(t: T) where T: Trait<AssociatedType=u32> {
println!("in foo");
}
impl Trait for i8 { type AssociatedType = &'static str; }
foo(3_i8);
```
Here is that same example again, with some explanatory comments:
```compile_fail,E0271
trait Trait { type AssociatedType; }
fn foo<T>(t: T) where T: Trait<AssociatedType=u32> {
// ~~~~~~~~ ~~~~~~~~~~~~~~~~~~
// | |
// This says `foo` can |
// only be used with |
// some type that |
// implements `Trait`. |
// |
// This says not only must
// `T` be an impl of `Trait`
// but also that the impl
// must assign the type `u32`
// to the associated type.
println!("in foo");
}
impl Trait for i8 { type AssociatedType = &'static str; }
//~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// | |
// `i8` does have |
// implementation |
// of `Trait`... |
// ... but it is an implementation
// that assigns `&'static str` to
// the associated type.
foo(3_i8);
// Here, we invoke `foo` with an `i8`, which does not satisfy
// the constraint `<i8 as Trait>::AssociatedType=u32`, and
// therefore the type-checker complains with this error code.
```
Here is a more subtle instance of the same problem, that can
arise with for-loops in Rust:
```compile_fail
let vs: Vec<i32> = vec![1, 2, 3, 4];
for v in &vs {
match v {
1 => {},
_ => {},
}
}
```
The above fails because of an analogous type mismatch,
though may be harder to see. Again, here are some
explanatory comments for the same example:
```compile_fail
{
let vs = vec![1, 2, 3, 4];
// `for`-loops use a protocol based on the `Iterator`
// trait. Each item yielded in a `for` loop has the
// type `Iterator::Item` -- that is, `Item` is the
// associated type of the concrete iterator impl.
for v in &vs {
// ~ ~~~
// | |
// | We borrow `vs`, iterating over a sequence of
// | *references* of type `&Elem` (where `Elem` is
// | vector's element type). Thus, the associated
// | type `Item` must be a reference `&`-type ...
// |
// ... and `v` has the type `Iterator::Item`, as dictated by
// the `for`-loop protocol ...
match v {
1 => {}
// ~
// |
// ... but *here*, `v` is forced to have some integral type;
// only types like `u8`,`i8`,`u16`,`i16`, et cetera can
// match the pattern `1` ...
_ => {}
}
// ... therefore, the compiler complains, because it sees
// an attempt to solve the equations
// `some integral-type` = type-of-`v`
// = `Iterator::Item`
// = `&Elem` (i.e. `some reference type`)
//
// which cannot possibly all be true.
}
}
```
To avoid those issues, you have to make the types match correctly.
So we can fix the previous examples like this:
```
// Basic Example:
trait Trait { type AssociatedType; }
fn foo<T>(t: T) where T: Trait<AssociatedType = &'static str> {
println!("in foo");
}
impl Trait for i8 { type AssociatedType = &'static str; }
foo(3_i8);
// For-Loop Example:
let vs = vec![1, 2, 3, 4];
for v in &vs {
match v {
&1 => {}
_ => {}
}
}
```
"##,
E0275: r##"
This error occurs when there was a recursive trait requirement that overflowed
before it could be evaluated. Often this means that there is unbounded
recursion in resolving some type bounds.
For example, in the following code:
```compile_fail,E0275
trait Foo {}
struct Bar<T>(T);
impl<T> Foo for T where Bar<T>: Foo {}
```
To determine if a `T` is `Foo`, we need to check if `Bar<T>` is `Foo`. However,
to do this check, we need to determine that `Bar<Bar<T>>` is `Foo`. To
determine this, we check if `Bar<Bar<Bar<T>>>` is `Foo`, and so on. This is
clearly a recursive requirement that can't be resolved directly.
Consider changing your trait bounds so that they're less self-referential.
"##,
E0276: r##"
This error occurs when a bound in an implementation of a trait does not match
the bounds specified in the original trait. For example:
```compile_fail,E0276
trait Foo {
fn foo<T>(x: T);
}
impl Foo for bool {
fn foo<T>(x: T) where T: Copy {}
}
```
Here, all types implementing `Foo` must have a method `foo<T>(x: T)` which can
take any type `T`. However, in the `impl` for `bool`, we have added an extra
bound that `T` is `Copy`, which isn't compatible with the original trait.
Consider removing the bound from the method or adding the bound to the original
method definition in the trait.
"##,
E0277: r##"
You tried to use a type which doesn't implement some trait in a place which
expected that trait. Erroneous code example:
```compile_fail,E0277
// here we declare the Foo trait with a bar method
trait Foo {
fn bar(&self);
}
// we now declare a function which takes an object implementing the Foo trait
fn some_func<T: Foo>(foo: T) {
foo.bar();
}
fn main() {
// we now call the method with the i32 type, which doesn't implement
// the Foo trait
some_func(5i32); // error: the trait bound `i32 : Foo` is not satisfied
}
```
In order to fix this error, verify that the type you're using does implement
the trait. Example:
```
trait Foo {
fn bar(&self);
}
fn some_func<T: Foo>(foo: T) {
foo.bar(); // we can now use this method since i32 implements the
// Foo trait
}
// we implement the trait on the i32 type
impl Foo for i32 {
fn bar(&self) {}
}
fn main() {
some_func(5i32); // ok!
}
```
Or in a generic context, an erroneous code example would look like:
```compile_fail,E0277
fn some_func<T>(foo: T) {
println!("{:?}", foo); // error: the trait `core::fmt::Debug` is not
// implemented for the type `T`
}
fn main() {
// We now call the method with the i32 type,
// which *does* implement the Debug trait.
some_func(5i32);
}
```
Note that the error here is in the definition of the generic function: Although
we only call it with a parameter that does implement `Debug`, the compiler
still rejects the function: It must work with all possible input types. In
order to make this example compile, we need to restrict the generic type we're
accepting:
```
use std::fmt;
// Restrict the input type to types that implement Debug.
fn some_func<T: fmt::Debug>(foo: T) {
println!("{:?}", foo);
}
fn main() {
// Calling the method is still fine, as i32 implements Debug.
some_func(5i32);
// This would fail to compile now:
// struct WithoutDebug;
// some_func(WithoutDebug);
}
```
Rust only looks at the signature of the called function, as such it must
already specify all requirements that will be used for every type parameter.
"##,
E0281: r##"
#### Note: this error code is no longer emitted by the compiler.
You tried to supply a type which doesn't implement some trait in a location
which expected that trait. This error typically occurs when working with
`Fn`-based types. Erroneous code example:
```compile-fail
fn foo<F: Fn(usize)>(x: F) { }
fn main() {
// type mismatch: ... implements the trait `core::ops::Fn<(String,)>`,
// but the trait `core::ops::Fn<(usize,)>` is required
// [E0281]
foo(|y: String| { });
}
```
The issue in this case is that `foo` is defined as accepting a `Fn` with one
argument of type `String`, but the closure we attempted to pass to it requires
one arguments of type `usize`.
"##,
E0282: r##"
This error indicates that type inference did not result in one unique possible
type, and extra information is required. In most cases this can be provided
by adding a type annotation. Sometimes you need to specify a generic type
parameter manually.
A common example is the `collect` method on `Iterator`. It has a generic type
parameter with a `FromIterator` bound, which for a `char` iterator is
implemented by `Vec` and `String` among others. Consider the following snippet
that reverses the characters of a string:
```compile_fail,E0282
let x = "hello".chars().rev().collect();
```
In this case, the compiler cannot infer what the type of `x` should be:
`Vec<char>` and `String` are both suitable candidates. To specify which type to
use, you can use a type annotation on `x`:
```
let x: Vec<char> = "hello".chars().rev().collect();
```
It is not necessary to annotate the full type. Once the ambiguity is resolved,
the compiler can infer the rest:
```
let x: Vec<_> = "hello".chars().rev().collect();
```
Another way to provide the compiler with enough information, is to specify the
generic type parameter:
```
let x = "hello".chars().rev().collect::<Vec<char>>();
```
Again, you need not specify the full type if the compiler can infer it:
```
let x = "hello".chars().rev().collect::<Vec<_>>();
```
Apart from a method or function with a generic type parameter, this error can
occur when a type parameter of a struct or trait cannot be inferred. In that
case it is not always possible to use a type annotation, because all candidates
have the same return type. For instance:
```compile_fail,E0282
struct Foo<T> {
num: T,
}
impl<T> Foo<T> {
fn bar() -> i32 {
0
}
fn baz() {
let number = Foo::bar();
}
}
```
This will fail because the compiler does not know which instance of `Foo` to
call `bar` on. Change `Foo::bar()` to `Foo::<T>::bar()` to resolve the error.
"##,
E0283: r##"
This error occurs when the compiler doesn't have enough information
to unambiguously choose an implementation.
For example:
```compile_fail,E0283
trait Generator {
fn create() -> u32;
}
struct Impl;
impl Generator for Impl {
fn create() -> u32 { 1 }
}
struct AnotherImpl;
impl Generator for AnotherImpl {
fn create() -> u32 { 2 }
}
fn main() {
let cont: u32 = Generator::create();
// error, impossible to choose one of Generator trait implementation
// Impl or AnotherImpl? Maybe anything else?
}
```
To resolve this error use the concrete type:
```
trait Generator {
fn create() -> u32;
}
struct AnotherImpl;
impl Generator for AnotherImpl {
fn create() -> u32 { 2 }
}
fn main() {
let gen1 = AnotherImpl::create();
// if there are multiple methods with same name (different traits)
let gen2 = <AnotherImpl as Generator>::create();
}
```
"##,
E0296: r##"
This error indicates that the given recursion limit could not be parsed. Ensure
that the value provided is a positive integer between quotes.
Erroneous code example:
```compile_fail,E0296
#![recursion_limit]
fn main() {}
```
And a working example:
```
#![recursion_limit="1000"]
fn main() {}
```
"##,
E0308: r##"
This error occurs when the compiler was unable to infer the concrete type of a
variable. It can occur for several cases, the most common of which is a
mismatch in the expected type that the compiler inferred for a variable's
initializing expression, and the actual type explicitly assigned to the
variable.
For example:
```compile_fail,E0308
let x: i32 = "I am not a number!";
// ~~~ ~~~~~~~~~~~~~~~~~~~~
// | |
// | initializing expression;
// | compiler infers type `&str`
// |
// type `i32` assigned to variable `x`
```
"##,
E0309: r##"
Types in type definitions have lifetimes associated with them that represent
how long the data stored within them is guaranteed to be live. This lifetime
must be as long as the data needs to be alive, and missing the constraint that
denotes this will cause this error.
```compile_fail,E0309
// This won't compile because T is not constrained, meaning the data
// stored in it is not guaranteed to last as long as the reference
struct Foo<'a, T> {
foo: &'a T
}
```
This will compile, because it has the constraint on the type parameter:
```
struct Foo<'a, T: 'a> {
foo: &'a T
}
```
To see why this is important, consider the case where `T` is itself a reference
(e.g., `T = &str`). If we don't include the restriction that `T: 'a`, the
following code would be perfectly legal:
```compile_fail,E0309
struct Foo<'a, T> {
foo: &'a T
}
fn main() {
let v = "42".to_string();
let f = Foo{foo: &v};
drop(v);
println!("{}", f.foo); // but we've already dropped v!
}
```
"##,
E0310: r##"
Types in type definitions have lifetimes associated with them that represent
how long the data stored within them is guaranteed to be live. This lifetime
must be as long as the data needs to be alive, and missing the constraint that
denotes this will cause this error.
```compile_fail,E0310
// This won't compile because T is not constrained to the static lifetime
// the reference needs
struct Foo<T> {
foo: &'static T
}
```
This will compile, because it has the constraint on the type parameter:
```
struct Foo<T: 'static> {
foo: &'static T
}
```
"##,
E0317: r##"
This error occurs when an `if` expression without an `else` block is used in a
context where a type other than `()` is expected, for example a `let`
expression:
```compile_fail,E0317
fn main() {
let x = 5;
let a = if x == 5 { 1 };
}
```
An `if` expression without an `else` block has the type `()`, so this is a type
error. To resolve it, add an `else` block having the same type as the `if`
block.
"##,
E0391: r##"
This error indicates that some types or traits depend on each other
and therefore cannot be constructed.
The following example contains a circular dependency between two traits:
```compile_fail,E0391
trait FirstTrait : SecondTrait {
}
trait SecondTrait : FirstTrait {
}
```
"##,
E0398: r##"
#### Note: this error code is no longer emitted by the compiler.
In Rust 1.3, the default object lifetime bounds are expected to change, as
described in [RFC 1156]. You are getting a warning because the compiler
thinks it is possible that this change will cause a compilation error in your
code. It is possible, though unlikely, that this is a false alarm.
The heart of the change is that where `&'a Box<SomeTrait>` used to default to
`&'a Box<SomeTrait+'a>`, it now defaults to `&'a Box<SomeTrait+'static>` (here,
`SomeTrait` is the name of some trait type). Note that the only types which are
affected are references to boxes, like `&Box<SomeTrait>` or
`&[Box<SomeTrait>]`. More common types like `&SomeTrait` or `Box<SomeTrait>`
are unaffected.
To silence this warning, edit your code to use an explicit bound. Most of the
time, this means that you will want to change the signature of a function that
you are calling. For example, if the error is reported on a call like `foo(x)`,
and `foo` is defined as follows:
```
# trait SomeTrait {}
fn foo(arg: &Box<SomeTrait>) { /* ... */ }
```
You might change it to:
```
# trait SomeTrait {}
fn foo<'a>(arg: &'a Box<SomeTrait+'a>) { /* ... */ }
```
This explicitly states that you expect the trait object `SomeTrait` to contain
references (with a maximum lifetime of `'a`).
[RFC 1156]: https://github.com/rust-lang/rfcs/blob/master/text/1156-adjust-default-object-bounds.md
"##,
E0452: r##"
An invalid lint attribute has been given. Erroneous code example:
```compile_fail,E0452
#![allow(foo = "")] // error: malformed lint attribute
```
Lint attributes only accept a list of identifiers (where each identifier is a
lint name). Ensure the attribute is of this form:
```
#![allow(foo)] // ok!
// or:
#![allow(foo, foo2)] // ok!
```
"##,
E0453: r##"
A lint check attribute was overruled by a `forbid` directive set as an
attribute on an enclosing scope, or on the command line with the `-F` option.
Example of erroneous code:
```compile_fail,E0453
#![forbid(non_snake_case)]
#[allow(non_snake_case)]
fn main() {
let MyNumber = 2; // error: allow(non_snake_case) overruled by outer
// forbid(non_snake_case)
}
```
The `forbid` lint setting, like `deny`, turns the corresponding compiler
warning into a hard error. Unlike `deny`, `forbid` prevents itself from being
overridden by inner attributes.
If you're sure you want to override the lint check, you can change `forbid` to
`deny` (or use `-D` instead of `-F` if the `forbid` setting was given as a
command-line option) to allow the inner lint check attribute:
```
#![deny(non_snake_case)]
#[allow(non_snake_case)]
fn main() {
let MyNumber = 2; // ok!
}
```
Otherwise, edit the code to pass the lint check, and remove the overruled
attribute:
```
#![forbid(non_snake_case)]
fn main() {
let my_number = 2;
}
```
"##,
E0478: r##"
A lifetime bound was not satisfied.
Erroneous code example:
```compile_fail,E0478
// Check that the explicit lifetime bound (`'SnowWhite`, in this example) must
// outlive all the superbounds from the trait (`'kiss`, in this example).
trait Wedding<'t>: 't { }
struct Prince<'kiss, 'SnowWhite> {
child: Box<Wedding<'kiss> + 'SnowWhite>,
// error: lifetime bound not satisfied
}
```
In this example, the `'SnowWhite` lifetime is supposed to outlive the `'kiss`
lifetime but the declaration of the `Prince` struct doesn't enforce it. To fix
this issue, you need to specify it:
```
trait Wedding<'t>: 't { }
struct Prince<'kiss, 'SnowWhite: 'kiss> { // You say here that 'kiss must live
// longer than 'SnowWhite.
child: Box<Wedding<'kiss> + 'SnowWhite>, // And now it's all good!
}
```
"##,
E0491: r##"
A reference has a longer lifetime than the data it references.
Erroneous code example:
```compile_fail,E0491
// struct containing a reference requires a lifetime parameter,
// because the data the reference points to must outlive the struct (see E0106)
struct Struct<'a> {
ref_i32: &'a i32,
}
// However, a nested struct like this, the signature itself does not tell
// whether 'a outlives 'b or the other way around.
// So it could be possible that 'b of reference outlives 'a of the data.
struct Nested<'a, 'b> {
ref_struct: &'b Struct<'a>, // compile error E0491
}
```
To fix this issue, you can specify a bound to the lifetime like below:
```
struct Struct<'a> {
ref_i32: &'a i32,
}
// 'a: 'b means 'a outlives 'b
struct Nested<'a: 'b, 'b> {
ref_struct: &'b Struct<'a>,
}
```
"##,
E0496: r##"
A lifetime name is shadowing another lifetime name. Erroneous code example:
```compile_fail,E0496
struct Foo<'a> {
a: &'a i32,
}
impl<'a> Foo<'a> {
fn f<'a>(x: &'a i32) { // error: lifetime name `'a` shadows a lifetime
// name that is already in scope
}
}
```
Please change the name of one of the lifetimes to remove this error. Example:
```
struct Foo<'a> {
a: &'a i32,
}
impl<'a> Foo<'a> {
fn f<'b>(x: &'b i32) { // ok!
}
}
fn main() {
}
```
"##,
E0497: r##"
A stability attribute was used outside of the standard library. Erroneous code
example:
```compile_fail
#[stable] // error: stability attributes may not be used outside of the
// standard library
fn foo() {}
```
It is not possible to use stability attributes outside of the standard library.
Also, for now, it is not possible to write deprecation messages either.
"##,
E0512: r##"
Transmute with two differently sized types was attempted. Erroneous code
example:
```compile_fail,E0512
fn takes_u8(_: u8) {}
fn main() {
unsafe { takes_u8(::std::mem::transmute(0u16)); }
// error: transmute called with types of different sizes
}
```
Please use types with same size or use the expected type directly. Example:
```
fn takes_u8(_: u8) {}
fn main() {
unsafe { takes_u8(::std::mem::transmute(0i8)); } // ok!
// or:
unsafe { takes_u8(0u8); } // ok!
}
```
"##,
E0517: r##"
This error indicates that a `#[repr(..)]` attribute was placed on an
unsupported item.
Examples of erroneous code:
```compile_fail,E0517
#[repr(C)]
type Foo = u8;
#[repr(packed)]
enum Foo {Bar, Baz}
#[repr(u8)]
struct Foo {bar: bool, baz: bool}
#[repr(C)]
impl Foo {
// ...
}
```
* The `#[repr(C)]` attribute can only be placed on structs and enums.
* The `#[repr(packed)]` and `#[repr(simd)]` attributes only work on structs.
* The `#[repr(u8)]`, `#[repr(i16)]`, etc attributes only work on enums.
These attributes do not work on typedefs, since typedefs are just aliases.
Representations like `#[repr(u8)]`, `#[repr(i64)]` are for selecting the
discriminant size for C-like enums (when there is no associated data, e.g.
`enum Color {Red, Blue, Green}`), effectively setting the size of the enum to
the size of the provided type. Such an enum can be cast to a value of the same
type as well. In short, `#[repr(u8)]` makes the enum behave like an integer
with a constrained set of allowed values.
Only C-like enums can be cast to numerical primitives, so this attribute will
not apply to structs.
`#[repr(packed)]` reduces padding to make the struct size smaller. The
representation of enums isn't strictly defined in Rust, and this attribute
won't work on enums.
`#[repr(simd)]` will give a struct consisting of a homogeneous series of machine
types (i.e. `u8`, `i32`, etc) a representation that permits vectorization via
SIMD. This doesn't make much sense for enums since they don't consist of a
single list of data.
"##,
E0518: r##"
This error indicates that an `#[inline(..)]` attribute was incorrectly placed
on something other than a function or method.
Examples of erroneous code:
```compile_fail,E0518
#[inline(always)]
struct Foo;
#[inline(never)]
impl Foo {
// ...
}
```
`#[inline]` hints the compiler whether or not to attempt to inline a method or
function. By default, the compiler does a pretty good job of figuring this out
itself, but if you feel the need for annotations, `#[inline(always)]` and
`#[inline(never)]` can override or force the compiler's decision.
If you wish to apply this attribute to all methods in an impl, manually annotate
each method; it is not possible to annotate the entire impl with an `#[inline]`
attribute.
"##,
E0522: r##"
The lang attribute is intended for marking special items that are built-in to
Rust itself. This includes special traits (like `Copy` and `Sized`) that affect
how the compiler behaves, as well as special functions that may be automatically
invoked (such as the handler for out-of-bounds accesses when indexing a slice).
Erroneous code example:
```compile_fail,E0522
#![feature(lang_items)]
#[lang = "cookie"]
fn cookie() -> ! { // error: definition of an unknown language item: `cookie`
loop {}
}
```
"##,
E0525: r##"
A closure was used but didn't implement the expected trait.
Erroneous code example:
```compile_fail,E0525
struct X;
fn foo<T>(_: T) {}
fn bar<T: Fn(u32)>(_: T) {}
fn main() {
let x = X;
let closure = |_| foo(x); // error: expected a closure that implements
// the `Fn` trait, but this closure only
// implements `FnOnce`
bar(closure);
}
```
In the example above, `closure` is an `FnOnce` closure whereas the `bar`
function expected an `Fn` closure. In this case, it's simple to fix the issue,
you just have to implement `Copy` and `Clone` traits on `struct X` and it'll
be ok:
```
#[derive(Clone, Copy)] // We implement `Clone` and `Copy` traits.
struct X;
fn foo<T>(_: T) {}
fn bar<T: Fn(u32)>(_: T) {}
fn main() {
let x = X;
let closure = |_| foo(x);
bar(closure); // ok!
}
```
To understand better how closures work in Rust, read:
https://doc.rust-lang.org/book/first-edition/closures.html
"##,
E0580: r##"
The `main` function was incorrectly declared.
Erroneous code example:
```compile_fail,E0580
fn main() -> i32 { // error: main function has wrong type
0
}
```
The `main` function prototype should never take arguments or return type.
Example:
```
fn main() {
// your code
}
```
If you want to get command-line arguments, use `std::env::args`. To exit with a
specified exit code, use `std::process::exit`.
"##,
E0591: r##"
Per [RFC 401][rfc401], if you have a function declaration `foo`:
```
// For the purposes of this explanation, all of these
// different kinds of `fn` declarations are equivalent:
struct S;
fn foo(x: S) { /* ... */ }
# #[cfg(for_demonstration_only)]
extern "C" { fn foo(x: S); }
# #[cfg(for_demonstration_only)]
impl S { fn foo(self) { /* ... */ } }
```
the type of `foo` is **not** `fn(S)`, as one might expect.
Rather, it is a unique, zero-sized marker type written here as `typeof(foo)`.
However, `typeof(foo)` can be _coerced_ to a function pointer `fn(S)`,
so you rarely notice this:
```
# struct S;
# fn foo(_: S) {}
let x: fn(S) = foo; // OK, coerces
```
The reason that this matter is that the type `fn(S)` is not specific to
any particular function: it's a function _pointer_. So calling `x()` results
in a virtual call, whereas `foo()` is statically dispatched, because the type
of `foo` tells us precisely what function is being called.
As noted above, coercions mean that most code doesn't have to be
concerned with this distinction. However, you can tell the difference
when using **transmute** to convert a fn item into a fn pointer.
This is sometimes done as part of an FFI:
```compile_fail,E0591
extern "C" fn foo(userdata: Box<i32>) {
/* ... */
}
# fn callback(_: extern "C" fn(*mut i32)) {}
# use std::mem::transmute;
# unsafe {
let f: extern "C" fn(*mut i32) = transmute(foo);
callback(f);
# }
```
Here, transmute is being used to convert the types of the fn arguments.
This pattern is incorrect because, because the type of `foo` is a function
**item** (`typeof(foo)`), which is zero-sized, and the target type (`fn()`)
is a function pointer, which is not zero-sized.
This pattern should be rewritten. There are a few possible ways to do this:
- change the original fn declaration to match the expected signature,
and do the cast in the fn body (the prefered option)
- cast the fn item fo a fn pointer before calling transmute, as shown here:
```
# extern "C" fn foo(_: Box<i32>) {}
# use std::mem::transmute;
# unsafe {
let f: extern "C" fn(*mut i32) = transmute(foo as extern "C" fn(_));
let f: extern "C" fn(*mut i32) = transmute(foo as usize); // works too
# }
```
The same applies to transmutes to `*mut fn()`, which were observedin practice.
Note though that use of this type is generally incorrect.
The intention is typically to describe a function pointer, but just `fn()`
alone suffices for that. `*mut fn()` is a pointer to a fn pointer.
(Since these values are typically just passed to C code, however, this rarely
makes a difference in practice.)
[rfc401]: https://github.com/rust-lang/rfcs/blob/master/text/0401-coercions.md
"##,
E0593: r##"
You tried to supply an `Fn`-based type with an incorrect number of arguments
than what was expected.
Erroneous code example:
```compile_fail,E0593
fn foo<F: Fn()>(x: F) { }
fn main() {
// [E0593] closure takes 1 argument but 0 arguments are required
foo(|y| { });
}
```
"##,
E0601: r##"
No `main` function was found in a binary crate. To fix this error, just add a
`main` function. For example:
```
fn main() {
// Your program will start here.
println!("Hello world!");
}
```
If you don't know the basics of Rust, you can go look to the Rust Book to get
started: https://doc.rust-lang.org/book/
"##,
E0602: r##"
An unknown lint was used on the command line.
Erroneous example:
```sh
rustc -D bogus omse_file.rs
```
Maybe you just misspelled the lint name or the lint doesn't exist anymore.
Either way, try to update/remove it in order to fix the error.
"##,
E0621: r##"
This error code indicates a mismatch between the lifetimes appearing in the
function signature (i.e., the parameter types and the return type) and the
data-flow found in the function body.
Erroneous code example:
```compile_fail,E0621
fn foo<'a>(x: &'a i32, y: &i32) -> &'a i32 { // error: explicit lifetime
// required in the type of
// `y`
if x > y { x } else { y }
}
```
In the code above, the function is returning data borrowed from either `x` or
`y`, but the `'a` annotation indicates that it is returning data only from `x`.
To fix the error, the signature and the body must be made to match. Typically,
this is done by updating the function signature. So, in this case, we change
the type of `y` to `&'a i32`, like so:
```
fn foo<'a>(x: &'a i32, y: &'a i32) -> &'a i32 {
if x > y { x } else { y }
}
```
Now the signature indicates that the function data borrowed from either `x` or
`y`. Alternatively, you could change the body to not return data from `y`:
```
fn foo<'a>(x: &'a i32, y: &i32) -> &'a i32 {
x
}
```
"##,
}
register_diagnostics! {
// E0006 // merged with E0005
// E0101, // replaced with E0282
// E0102, // replaced with E0282
// E0134,
// E0135,
// E0272, // on_unimplemented #0
// E0273, // on_unimplemented #1
// E0274, // on_unimplemented #2
E0278, // requirement is not satisfied
E0279, // requirement is not satisfied
E0280, // requirement is not satisfied
E0284, // cannot resolve type
// E0285, // overflow evaluation builtin bounds
// E0300, // unexpanded macro
// E0304, // expected signed integer constant
// E0305, // expected constant
E0311, // thing may not live long enough
E0312, // lifetime of reference outlives lifetime of borrowed content
E0313, // lifetime of borrowed pointer outlives lifetime of captured variable
E0314, // closure outlives stack frame
E0315, // cannot invoke closure outside of its lifetime
E0316, // nested quantification of lifetimes
E0320, // recursive overflow during dropck
E0473, // dereference of reference outside its lifetime
E0474, // captured variable `..` does not outlive the enclosing closure
E0475, // index of slice outside its lifetime
E0476, // lifetime of the source pointer does not outlive lifetime bound...
E0477, // the type `..` does not fulfill the required lifetime...
E0479, // the type `..` (provided as the value of a type parameter) is...
E0480, // lifetime of method receiver does not outlive the method call
E0481, // lifetime of function argument does not outlive the function call
E0482, // lifetime of return value does not outlive the function call
E0483, // lifetime of operand does not outlive the operation
E0484, // reference is not valid at the time of borrow
E0485, // automatically reference is not valid at the time of borrow
E0486, // type of expression contains references that are not valid during...
E0487, // unsafe use of destructor: destructor might be called while...
E0488, // lifetime of variable does not enclose its declaration
E0489, // type/lifetime parameter not in scope here
E0490, // a value of type `..` is borrowed for too long
E0495, // cannot infer an appropriate lifetime due to conflicting requirements
E0566, // conflicting representation hints
E0623, // lifetime mismatch where both parameters are anonymous regions
E0628, // generators cannot have explicit arguments
E0631, // type mismatch in closure arguments
E0637, // "'_" is not a valid lifetime bound
}
| 27.495233 | 99 | 0.678072 |
14c48fe697fa687ea827a3d12fbc3615899bf2bb | 1,529 | // enums3.rs
// Address all the TODOs to make the tests pass!
enum Message {
ChangeColor((u8, u8, u8)),
Echo(String),
Move(Point),
Quit,
}
struct Point {
x: u8,
y: u8,
}
struct State {
color: (u8, u8, u8),
position: Point,
quit: bool,
}
impl State {
fn change_color(&mut self, color: (u8, u8, u8)) {
self.color = color;
}
fn quit(&mut self) {
self.quit = true;
}
fn echo(&self, s: String) {
println!("{}", s);
}
fn move_position(&mut self, p: Point) {
self.position = p;
}
fn process(&mut self, message: Message) {
match message {
Message::ChangeColor((r, g, b)) => self.change_color((r, g, b)),
Message::Quit => self.quit(),
Message::Echo(s) => self.echo(s),
Message::Move(p) => self.move_position(p),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_match_message_call() {
let mut state = State {
quit: false,
position: Point { x: 0, y: 0 },
color: (0, 0, 0),
};
state.process(Message::ChangeColor((255, 0, 255)));
state.process(Message::Echo(String::from("hello world")));
state.process(Message::Move(Point { x: 10, y: 15 }));
state.process(Message::Quit);
assert_eq!(state.color, (255, 0, 255));
assert_eq!(state.position.x, 10);
assert_eq!(state.position.y, 15);
assert_eq!(state.quit, true);
}
}
| 21.535211 | 76 | 0.516678 |
b974afe0d352ea99a007333610db162f435af7f1 | 11,543 | // This file is part of Substrate.
// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Handling custom voting rules for GRANDPA.
//!
//! This exposes the `VotingRule` trait used to implement arbitrary voting
//! restrictions that are taken into account by the GRANDPA environment when
//! selecting a finality target to vote on.
use std::{future::Future, pin::Pin, sync::Arc};
use dyn_clone::DynClone;
use sc_client_api::blockchain::HeaderBackend;
use sp_runtime::{
generic::BlockId,
traits::{Block as BlockT, Header, NumberFor, One, Zero},
};
/// A future returned by a `VotingRule` to restrict a given vote, if any restriction is necessary.
pub type VotingRuleResult<Block> =
Pin<Box<dyn Future<Output = Option<(<Block as BlockT>::Hash, NumberFor<Block>)>> + Send>>;
/// A trait for custom voting rules in GRANDPA.
pub trait VotingRule<Block, B>: DynClone + Send + Sync
where
Block: BlockT,
B: HeaderBackend<Block>,
{
/// Restrict the given `current_target` vote, returning the block hash and
/// number of the block to vote on, and `None` in case the vote should not
/// be restricted. `base` is the block that we're basing our votes on in
/// order to pick our target (e.g. last round estimate), and `best_target`
/// is the initial best vote target before any vote rules were applied. When
/// applying multiple `VotingRule`s both `base` and `best_target` should
/// remain unchanged.
///
/// The contract of this interface requires that when restricting a vote, the
/// returned value **must** be an ancestor of the given `current_target`,
/// this also means that a variant must be maintained throughout the
/// execution of voting rules wherein `current_target <= best_target`.
fn restrict_vote(
&self,
backend: Arc<B>,
base: &Block::Header,
best_target: &Block::Header,
current_target: &Block::Header,
) -> VotingRuleResult<Block>;
}
impl<Block, B> VotingRule<Block, B> for ()
where
Block: BlockT,
B: HeaderBackend<Block>,
{
fn restrict_vote(
&self,
_backend: Arc<B>,
_base: &Block::Header,
_best_target: &Block::Header,
_current_target: &Block::Header,
) -> VotingRuleResult<Block> {
Box::pin(async { None })
}
}
/// A custom voting rule that guarantees that our vote is always behind the best
/// block by at least N blocks. In the best case our vote is exactly N blocks
/// behind the best block.
#[derive(Clone)]
pub struct BeforeBestBlockBy<N>(N);
impl<Block, B> VotingRule<Block, B> for BeforeBestBlockBy<NumberFor<Block>>
where
Block: BlockT,
B: HeaderBackend<Block>,
{
fn restrict_vote(
&self,
backend: Arc<B>,
_base: &Block::Header,
best_target: &Block::Header,
current_target: &Block::Header,
) -> VotingRuleResult<Block> {
use sp_arithmetic::traits::Saturating;
if current_target.number().is_zero() {
return Box::pin(async { None })
}
// find the target number restricted by this rule
let target_number = best_target.number().saturating_sub(self.0);
// our current target is already lower than this rule would restrict
if target_number >= *current_target.number() {
return Box::pin(async { None })
}
let current_target = current_target.clone();
// find the block at the given target height
Box::pin(std::future::ready(find_target(&*backend, target_number.clone(), ¤t_target)))
}
}
/// A custom voting rule that limits votes towards 3/4 of the unfinalized chain,
/// using the given `base` and `best_target` to figure where the 3/4 target
/// should fall.
#[derive(Clone)]
pub struct ThreeQuartersOfTheUnfinalizedChain;
impl<Block, B> VotingRule<Block, B> for ThreeQuartersOfTheUnfinalizedChain
where
Block: BlockT,
B: HeaderBackend<Block>,
{
fn restrict_vote(
&self,
backend: Arc<B>,
base: &Block::Header,
best_target: &Block::Header,
current_target: &Block::Header,
) -> VotingRuleResult<Block> {
// target a vote towards 3/4 of the unfinalized chain (rounding up)
let target_number = {
let two = NumberFor::<Block>::one() + One::one();
let three = two + One::one();
let four = three + One::one();
let diff = *best_target.number() - *base.number();
let diff = ((diff * three) + two) / four;
*base.number() + diff
};
// our current target is already lower than this rule would restrict
if target_number >= *current_target.number() {
return Box::pin(async { None })
}
// find the block at the given target height
Box::pin(std::future::ready(find_target(&*backend, target_number, current_target)))
}
}
// walk backwards until we find the target block
fn find_target<Block, B>(
backend: &B,
target_number: NumberFor<Block>,
current_header: &Block::Header,
) -> Option<(Block::Hash, NumberFor<Block>)>
where
Block: BlockT,
B: HeaderBackend<Block>,
{
let mut target_hash = current_header.hash();
let mut target_header = current_header.clone();
loop {
if *target_header.number() < target_number {
unreachable!(
"we are traversing backwards from a known block; \
blocks are stored contiguously; \
qed"
);
}
if *target_header.number() == target_number {
return Some((target_hash, target_number))
}
target_hash = *target_header.parent_hash();
target_header = backend
.header(BlockId::Hash(target_hash))
.ok()?
.expect("Header known to exist due to the existence of one of its descendents; qed");
}
}
struct VotingRules<Block, B> {
rules: Arc<Vec<Box<dyn VotingRule<Block, B>>>>,
}
impl<B, Block> Clone for VotingRules<B, Block> {
fn clone(&self) -> Self {
VotingRules { rules: self.rules.clone() }
}
}
impl<Block, B> VotingRule<Block, B> for VotingRules<Block, B>
where
Block: BlockT,
B: HeaderBackend<Block> + 'static,
{
fn restrict_vote(
&self,
backend: Arc<B>,
base: &Block::Header,
best_target: &Block::Header,
current_target: &Block::Header,
) -> VotingRuleResult<Block> {
let rules = self.rules.clone();
let base = base.clone();
let best_target = best_target.clone();
let current_target = current_target.clone();
Box::pin(async move {
let mut restricted_target = current_target.clone();
for rule in rules.iter() {
if let Some(header) = rule
.restrict_vote(backend.clone(), &base, &best_target, &restricted_target)
.await
.filter(|(_, restricted_number)| {
// NOTE: we can only restrict votes within the interval [base, target)
restricted_number >= base.number() &&
restricted_number < restricted_target.number()
})
.and_then(|(hash, _)| backend.header(BlockId::Hash(hash)).ok())
.and_then(std::convert::identity)
{
restricted_target = header;
}
}
let restricted_hash = restricted_target.hash();
if restricted_hash != current_target.hash() {
Some((restricted_hash, *restricted_target.number()))
} else {
None
}
})
}
}
/// A builder of a composite voting rule that applies a set of rules to
/// progressively restrict the vote.
pub struct VotingRulesBuilder<Block, B> {
rules: Vec<Box<dyn VotingRule<Block, B>>>,
}
impl<Block, B> Default for VotingRulesBuilder<Block, B>
where
Block: BlockT,
B: HeaderBackend<Block> + 'static,
{
fn default() -> Self {
VotingRulesBuilder::new()
.add(BeforeBestBlockBy(2u32.into()))
.add(ThreeQuartersOfTheUnfinalizedChain)
}
}
impl<Block, B> VotingRulesBuilder<Block, B>
where
Block: BlockT,
B: HeaderBackend<Block> + 'static,
{
/// Return a new voting rule builder using the given backend.
pub fn new() -> Self {
VotingRulesBuilder { rules: Vec::new() }
}
/// Add a new voting rule to the builder.
pub fn add<R>(mut self, rule: R) -> Self
where
R: VotingRule<Block, B> + 'static,
{
self.rules.push(Box::new(rule));
self
}
/// Add all given voting rules to the builder.
pub fn add_all<I>(mut self, rules: I) -> Self
where
I: IntoIterator<Item = Box<dyn VotingRule<Block, B>>>,
{
self.rules.extend(rules);
self
}
/// Return a new `VotingRule` that applies all of the previously added
/// voting rules in-order.
pub fn build(self) -> impl VotingRule<Block, B> + Clone {
VotingRules { rules: Arc::new(self.rules) }
}
}
impl<Block, B> VotingRule<Block, B> for Box<dyn VotingRule<Block, B>>
where
Block: BlockT,
B: HeaderBackend<Block>,
Self: Clone,
{
fn restrict_vote(
&self,
backend: Arc<B>,
base: &Block::Header,
best_target: &Block::Header,
current_target: &Block::Header,
) -> VotingRuleResult<Block> {
(**self).restrict_vote(backend, base, best_target, current_target)
}
}
#[cfg(test)]
mod tests {
use super::*;
use sc_block_builder::BlockBuilderProvider;
use sp_consensus::BlockOrigin;
use sp_runtime::traits::Header as _;
use substrate_test_runtime_client::{
runtime::{Block, Header},
Backend, Client, ClientBlockImportExt, DefaultTestClientBuilderExt, TestClientBuilder,
TestClientBuilderExt,
};
/// A mock voting rule that subtracts a static number of block from the `current_target`.
#[derive(Clone)]
struct Subtract(u64);
impl VotingRule<Block, Client<Backend>> for Subtract {
fn restrict_vote(
&self,
backend: Arc<Client<Backend>>,
_base: &Header,
_best_target: &Header,
current_target: &Header,
) -> VotingRuleResult<Block> {
let target_number = current_target.number() - self.0;
let res = backend
.hash(target_number)
.unwrap()
.map(|target_hash| (target_hash, target_number));
Box::pin(std::future::ready(res))
}
}
#[test]
fn multiple_voting_rules_cannot_restrict_past_base() {
// setup an aggregate voting rule composed of two voting rules
// where each subtracts 50 blocks from the current target
let rule = VotingRulesBuilder::new().add(Subtract(50)).add(Subtract(50)).build();
let mut client = Arc::new(TestClientBuilder::new().build());
for _ in 0..200 {
let block = client.new_block(Default::default()).unwrap().build().unwrap().block;
futures::executor::block_on(client.import(BlockOrigin::Own, block)).unwrap();
}
let genesis = client.header(&BlockId::Number(0u32.into())).unwrap().unwrap();
let best = client.header(&BlockId::Hash(client.info().best_hash)).unwrap().unwrap();
let (_, number) =
futures::executor::block_on(rule.restrict_vote(client.clone(), &genesis, &best, &best))
.unwrap();
// we apply both rules which should subtract 100 blocks from best block (#200)
// which means that we should be voting for block #100
assert_eq!(number, 100);
let block110 = client.header(&BlockId::Number(110u32.into())).unwrap().unwrap();
let (_, number) = futures::executor::block_on(rule.restrict_vote(
client.clone(),
&block110,
&best,
&best,
))
.unwrap();
// base block is #110 while best block is #200, applying both rules would make
// would make the target block (#100) be lower than the base block, therefore
// only one of the rules is applied.
assert_eq!(number, 150);
}
}
| 29.075567 | 98 | 0.697826 |
6a13eb73f08c8a51fcf7225d696238c8310651f9 | 2,238 | mod game_boy;
mod window;
use minifb::{Key, KeyRepeat};
use std::str::FromStr;
use crate::window::GbWindow;
use clap::{App, Arg};
use rand::Rng;
// Links:
// Endianness Guide:
// -> https://pastebin.com/5BEvWb2h
// -> GB classic is Little Endian
// https://gbdev.gg8.se/wiki/articles/Main_Page
// https://mgba-emu.github.io/gbdoc/
// https://rgbds.gbdev.io/docs/v0.5.0/gbz80.7
// https://www.pastraiser.com/cpu/gameboy/gameboy_opcodes.html
// http://gameboy.mongenel.com/dmg/asmmemmap.html
// http://bgb.bircd.org/pandocs.htm
// https://github.com/gbdev/awesome-gbdev
// https://ladecadence.net/trastero/listado%20juegos%20gameboy.html
// https://romhustler.org/roms/gbc/number
// https://github.com/aidan-clyens/GBExperience
struct CliOpts {
rom_path: String,
magnification: usize,
}
impl CliOpts {
fn load() -> CliOpts {
let matches = App::new("GB-rs")
.arg(Arg::with_name("rom-path").required(true).index(1))
.arg(
Arg::with_name("magnification")
.short("m")
.long("magnification")
.value_name("VAL"),
)
.get_matches();
let rom_path = matches.value_of("rom-path").unwrap().to_owned();
let magnification = matches
.value_of("magnification")
.map(|o| usize::from_str(o).expect("Could not parse number"))
.unwrap_or(2);
CliOpts {
rom_path,
magnification,
}
}
}
fn main() {
println!("Mem region: {:?}", game_boy::memory::MemRegion::get_region(0xC3C8));
let opts = CliOpts::load();
let mut gb = game_boy::GameBoy::load(&opts.rom_path.into()).unwrap();
gb.memory().rom().print_meta();
let mut window = GbWindow::new(opts.magnification);
// for i in window.buffer_mut().iter_mut() {
// *i = rand::thread_rng().gen_range(0..=3);
// }
while window.is_open() {
// if window.win().is_key_pressed(Key::Space, KeyRepeat::No) {
// for i in window.buffer_mut().iter_mut() {
// *i = rand::thread_rng().gen_range(0..=3);
// }
// }
gb.frame(window.buffer_mut());
window.display();
}
}
| 28.329114 | 82 | 0.582216 |
6ad44b410eac317a577ee2e77533770d18cb48ea | 15,225 | use std::sync::Arc;
use test::Bencher;
use protocol::types::Hasher;
use super::*;
macro_rules! insert {
(normal($pool_size: expr, $input: expr, $output: expr)) => {
insert!(inner($pool_size, 1, $input, 0, $output));
};
(repeat($repeat: expr, $input: expr, $output: expr)) => {
insert!(inner($input * 10, $repeat, $input, 0, $output));
};
(invalid($valid: expr, $invalid: expr, $output: expr)) => {
insert!(inner($valid * 10, 1, $valid, $invalid, $output));
};
(inner($pool_size: expr, $repeat: expr, $valid: expr, $invalid: expr, $output: expr)) => {
let mempool =
Arc::new(new_mempool($pool_size, TIMEOUT_GAP, CYCLE_LIMIT, MAX_TX_SIZE).await);
let txs = mock_txs($valid, $invalid, TIMEOUT);
for _ in 0..$repeat {
concurrent_insert(txs.clone(), Arc::clone(&mempool)).await;
}
assert_eq!(mempool.get_tx_cache().len(), $output);
};
}
#[test]
fn test_dup_order_hashes() {
let hashes = vec![
Hasher::digest(Bytes::from("test1")),
Hasher::digest(Bytes::from("test2")),
Hasher::digest(Bytes::from("test3")),
Hasher::digest(Bytes::from("test4")),
Hasher::digest(Bytes::from("test2")),
];
assert!(check_dup_order_hashes(&hashes).is_err());
let hashes = vec![
Hasher::digest(Bytes::from("test1")),
Hasher::digest(Bytes::from("test2")),
Hasher::digest(Bytes::from("test3")),
Hasher::digest(Bytes::from("test4")),
];
assert!(check_dup_order_hashes(&hashes).is_ok());
}
#[tokio::test]
async fn test_insert() {
// 1. insertion under pool size.
insert!(normal(100, 100, 100));
// 2. invalid insertion
insert!(invalid(80, 10, 80));
}
macro_rules! package {
(normal($tx_num_limit: expr, $insert: expr, $expect_order: expr, $expect_propose: expr)) => {
package!(inner(
$tx_num_limit,
TIMEOUT_GAP,
TIMEOUT,
$insert,
$expect_order,
$expect_propose
));
};
(timeout($timeout_gap: expr, $timeout: expr, $insert: expr, $expect: expr)) => {
package!(inner($insert, $timeout_gap, $timeout, $insert, $expect, 0));
};
(inner($tx_num_limit: expr, $timeout_gap: expr, $timeout: expr, $insert: expr, $expect_order: expr, $expect_propose: expr)) => {
let mempool =
&Arc::new(new_mempool($insert * 10, $timeout_gap, CYCLE_LIMIT, MAX_TX_SIZE).await);
let txs = mock_txs($insert, 0, $timeout);
concurrent_insert(txs.clone(), Arc::clone(mempool)).await;
protocol::tokio::time::sleep(std::time::Duration::from_millis(100)).await;
let tx_hashes = exec_package(Arc::clone(mempool), CYCLE_LIMIT.into(), $tx_num_limit).await;
assert_eq!(tx_hashes.len(), $expect_order);
};
}
#[tokio::test]
async fn test_package() {
// 1. pool_size <= tx_num_limit
package!(normal(100, 50, 50, 0));
package!(normal(100, 100, 100, 0));
// 2. tx_num_limit < pool_size <= 2 * tx_num_limit
package!(normal(100, 101, 100, 0));
package!(normal(100, 200, 100, 0));
// 3. 2 * tx_num_limit < pool_size
package!(normal(100, 201, 100, 0));
}
#[tokio::test]
async fn test_flush() {
let mempool = Arc::new(default_mempool().await);
// insert txs
let txs = default_mock_txs(555);
concurrent_insert(txs.clone(), Arc::clone(&mempool)).await;
assert_eq!(mempool.get_tx_cache().len(), 555);
// flush exist txs
let (remove_txs, _) = txs.split_at(123);
let remove_hashes: Vec<Hash> = remove_txs.iter().map(|tx| tx.transaction.hash).collect();
exec_flush(remove_hashes, Arc::clone(&mempool)).await;
assert_eq!(mempool.len(), 432);
exec_package(Arc::clone(&mempool), CYCLE_LIMIT.into(), TX_NUM_LIMIT).await;
assert_eq!(mempool.len(), 432);
// flush absent txs
let txs = default_mock_txs(222);
let remove_hashes: Vec<Hash> = txs.iter().map(|tx| tx.transaction.hash).collect();
exec_flush(remove_hashes, Arc::clone(&mempool)).await;
assert_eq!(mempool.get_tx_cache().len(), 432);
}
macro_rules! ensure_order_txs {
($in_pool: expr, $out_pool: expr) => {
let mempool = &Arc::new(default_mempool().await);
let txs = &default_mock_txs($in_pool + $out_pool);
let (in_pool_txs, out_pool_txs) = txs.split_at($in_pool);
concurrent_insert(in_pool_txs.to_vec(), Arc::clone(mempool)).await;
concurrent_broadcast(out_pool_txs.to_vec(), Arc::clone(mempool)).await;
let tx_hashes: Vec<Hash> = txs.iter().map(|tx| tx.transaction.hash.clone()).collect();
exec_ensure_order_txs(tx_hashes.clone(), Arc::clone(mempool)).await;
let fetch_txs = exec_get_full_txs(tx_hashes, Arc::clone(mempool)).await;
assert_eq!(fetch_txs.len(), txs.len());
};
}
#[tokio::test]
async fn test_ensure_order_txs() {
// all txs are in pool
ensure_order_txs!(100, 0);
// 50 txs are not in pool
ensure_order_txs!(50, 50);
// all txs are not in pool
ensure_order_txs!(0, 100);
}
// #[tokio::test]
// async fn test_sync_propose_txs() {
// let mempool = &Arc::new(default_mempool().await);
// let txs = &default_mock_txs(50);
// let (exist_txs, need_sync_txs) = txs.split_at(20);
// concurrent_insert(exist_txs.to_vec(), Arc::clone(mempool)).await;
// concurrent_broadcast(need_sync_txs.to_vec(), Arc::clone(mempool)).await;
// let tx_hashes: Vec<Hash> = txs.iter().map(|tx|
// tx.transaction.hash).collect(); exec_sync_propose_txs(tx_hashes,
// Arc::clone(mempool)).await;
// assert_eq!(mempool.get_tx_cache().len(), 50);
// }
#[rustfmt::skip]
/// Bench in Intel(R) Core(TM) i7-4770HQ CPU @ 2.20GHz (8 x 2200):
/// test tests::mempool::bench_check_sig ... bench: 2,881,140 ns/iter (+/- 907,215)
/// test tests::mempool::bench_check_sig_serial_1 ... bench: 94,666 ns/iter (+/- 11,070)
/// test tests::mempool::bench_check_sig_serial_10 ... bench: 966,800 ns/iter (+/- 97,227)
/// test tests::mempool::bench_check_sig_serial_100 ... bench: 10,098,216 ns/iter (+/- 1,289,584)
/// test tests::mempool::bench_check_sig_serial_1000 ... bench: 100,396,727 ns/iter (+/- 10,665,143)
/// test tests::mempool::bench_flush ... bench: 3,504,193 ns/iter (+/- 1,096,699)
/// test tests::mempool::bench_get_10000_full_txs ... bench: 14,997,762 ns/iter (+/- 2,697,725)
/// test tests::mempool::bench_get_20000_full_txs ... bench: 31,858,720 ns/iter (+/- 3,822,648)
/// test tests::mempool::bench_get_40000_full_txs ... bench: 65,027,639 ns/iter (+/- 3,926,768)
/// test tests::mempool::bench_get_80000_full_txs ... bench: 131,066,149 ns/iter (+/- 11,457,417)
/// test tests::mempool::bench_insert ... bench: 9,320,879 ns/iter (+/- 710,246)
/// test tests::mempool::bench_insert_serial_1 ... bench: 4,588 ns/iter (+/- 349)
/// test tests::mempool::bench_insert_serial_10 ... bench: 44,027 ns/iter (+/- 4,168)
/// test tests::mempool::bench_insert_serial_100 ... bench: 432,974 ns/iter (+/- 43,058)
/// test tests::mempool::bench_insert_serial_1000 ... bench: 4,449,648 ns/iter (+/- 560,818)
/// test tests::mempool::bench_mock_txs ... bench: 5,890,752 ns/iter (+/- 583,029)
/// test tests::mempool::bench_package ... bench: 3,684,431 ns/iter (+/- 278,575)
/// test tx_cache::tests::bench_flush ... bench: 3,034,868 ns/iter (+/- 371,514)
/// test tx_cache::tests::bench_flush_insert ... bench: 2,954,223 ns/iter (+/- 389,002)
/// test tx_cache::tests::bench_gen_txs ... bench: 2,479,226 ns/iter (+/- 399,728)
/// test tx_cache::tests::bench_insert ... bench: 2,742,422 ns/iter (+/- 641,587)
/// test tx_cache::tests::bench_package ... bench: 70,563 ns/iter (+/- 16,723)
/// test tx_cache::tests::bench_package_insert ... bench: 2,654,196 ns/iter (+/- 285,460)
#[bench]
fn bench_insert(b: &mut Bencher) {
let runtime = tokio::runtime::Runtime::new().unwrap();
let mempool = &Arc::new(default_mempool_sync());
b.iter(|| {
let txs = default_mock_txs(100);
runtime.block_on(concurrent_insert(txs, Arc::clone(mempool)));
});
}
#[bench]
fn bench_insert_serial_1(b: &mut Bencher) {
let mempool = &Arc::new(default_mempool_sync());
let txs = default_mock_txs(1);
b.iter(move || {
futures::executor::block_on(async {
for tx in txs.clone().into_iter() {
let _ = mempool.insert(Context::new(), tx).await;
}
});
})
}
#[bench]
fn bench_insert_serial_10(b: &mut Bencher) {
let mempool = &Arc::new(default_mempool_sync());
let txs = default_mock_txs(10);
b.iter(move || {
futures::executor::block_on(async {
for tx in txs.clone().into_iter() {
let _ = mempool.insert(Context::new(), tx).await;
}
});
})
}
#[bench]
fn bench_insert_serial_100(b: &mut Bencher) {
let runtime = tokio::runtime::Runtime::new().unwrap();
let mempool = &Arc::new(default_mempool_sync());
let txs = default_mock_txs(100);
b.iter(move || {
runtime.block_on(async {
for tx in txs.clone().into_iter() {
let _ = mempool.insert(Context::new(), tx).await;
}
});
})
}
#[bench]
fn bench_insert_serial_1000(b: &mut Bencher) {
let runtime = tokio::runtime::Runtime::new().unwrap();
let mempool = &Arc::new(default_mempool_sync());
let txs = default_mock_txs(1000);
b.iter(move || {
runtime.block_on(async {
for tx in txs.clone().into_iter() {
let _ = mempool.insert(Context::new(), tx).await;
}
});
})
}
#[bench]
fn bench_package(b: &mut Bencher) {
let runtime = tokio::runtime::Runtime::new().unwrap();
let mempool = Arc::new(runtime.block_on(default_mempool()));
let txs = default_mock_txs(20_000);
runtime.block_on(concurrent_insert(txs, Arc::clone(&mempool)));
std::thread::sleep(std::time::Duration::from_secs(1));
assert_eq!(mempool.get_tx_cache().real_queue_len(), 20_000);
b.iter(|| {
runtime.block_on(exec_package(
Arc::clone(&mempool),
CYCLE_LIMIT.into(),
TX_NUM_LIMIT,
));
});
}
#[bench]
fn bench_get_10000_full_txs(b: &mut Bencher) {
let runtime = tokio::runtime::Runtime::new().unwrap();
let mempool = Arc::new(default_mempool_sync());
let txs = default_mock_txs(10_000);
let tx_hashes = txs.iter().map(|tx| tx.transaction.hash).collect::<Vec<_>>();
runtime.block_on(concurrent_insert(txs, Arc::clone(&mempool)));
b.iter(|| {
runtime.block_on(exec_get_full_txs(tx_hashes.clone(), Arc::clone(&mempool)));
});
}
#[bench]
fn bench_get_20000_full_txs(b: &mut Bencher) {
let runtime = tokio::runtime::Runtime::new().unwrap();
let mempool = Arc::new(default_mempool_sync());
let txs = default_mock_txs(20_000);
let tx_hashes = txs.iter().map(|tx| tx.transaction.hash).collect::<Vec<_>>();
runtime.block_on(concurrent_insert(txs, Arc::clone(&mempool)));
b.iter(|| {
runtime.block_on(exec_get_full_txs(tx_hashes.clone(), Arc::clone(&mempool)));
});
}
#[bench]
#[ignore]
fn bench_get_40000_full_txs(b: &mut Bencher) {
let runtime = tokio::runtime::Runtime::new().unwrap();
let mempool = Arc::new(default_mempool_sync());
let txs = default_mock_txs(40_000);
let tx_hashes = txs.iter().map(|tx| tx.transaction.hash).collect::<Vec<_>>();
runtime.block_on(concurrent_insert(txs, Arc::clone(&mempool)));
b.iter(|| {
runtime.block_on(exec_get_full_txs(tx_hashes.clone(), Arc::clone(&mempool)));
});
}
#[bench]
#[ignore]
fn bench_get_80000_full_txs(b: &mut Bencher) {
let runtime = tokio::runtime::Runtime::new().unwrap();
let mempool = Arc::new(default_mempool_sync());
let txs = default_mock_txs(80_000);
let tx_hashes = txs.iter().map(|tx| tx.transaction.hash).collect::<Vec<_>>();
runtime.block_on(concurrent_insert(txs, Arc::clone(&mempool)));
b.iter(|| {
runtime.block_on(exec_get_full_txs(tx_hashes.clone(), Arc::clone(&mempool)));
});
}
#[bench]
fn bench_flush(b: &mut Bencher) {
let runtime = tokio::runtime::Runtime::new().unwrap();
let mempool = &Arc::new(default_mempool_sync());
let txs = &default_mock_txs(100);
let remove_hashes: &Vec<Hash> = &txs.iter().map(|tx| tx.transaction.hash).collect();
b.iter(|| {
runtime.block_on(concurrent_insert(txs.clone(), Arc::clone(mempool)));
runtime.block_on(exec_flush(remove_hashes.clone(), Arc::clone(mempool)));
runtime.block_on(exec_package(
Arc::clone(mempool),
CYCLE_LIMIT.into(),
TX_NUM_LIMIT,
));
});
}
#[tokio::test]
async fn bench_sign_with_spawn_list() {
let adapter = Arc::new(HashMemPoolAdapter::new());
let txs = default_mock_txs(30000);
let len = txs.len();
let now = common_apm::Instant::now();
let futs = txs
.into_iter()
.map(|tx| {
let adapter = Arc::clone(&adapter);
tokio::spawn(async move {
adapter
.check_authorization(Context::new(), &tx)
.await
.unwrap();
})
})
.collect::<Vec<_>>();
futures::future::try_join_all(futs).await.unwrap();
println!(
"bench_sign_with_spawn_list size {:?} cost {:?}",
len,
now.elapsed()
);
}
#[tokio::test]
async fn bench_sign() {
let adapter = HashMemPoolAdapter::new();
let txs = default_mock_txs(30000).into_iter().collect::<Vec<_>>();
let now = common_apm::Instant::now();
for tx in txs.iter() {
adapter
.check_authorization(Context::new(), tx)
.await
.unwrap();
}
println!("bench_sign size {:?} cost {:?}", txs.len(), now.elapsed());
}
#[bench]
fn bench_mock_txs(b: &mut Bencher) {
b.iter(|| {
default_mock_txs(100);
});
}
#[bench]
fn bench_check_sig(b: &mut Bencher) {
let runtime = tokio::runtime::Runtime::new().unwrap();
let txs = &default_mock_txs(100);
b.iter(|| {
runtime.block_on(concurrent_check_sig(txs.clone()));
});
}
#[bench]
fn bench_check_sig_serial_1(b: &mut Bencher) {
let txs = default_mock_txs(1);
b.iter(|| {
for tx in txs.iter() {
let _ = check_sig(tx);
}
})
}
#[bench]
fn bench_check_sig_serial_10(b: &mut Bencher) {
let txs = default_mock_txs(10);
b.iter(|| {
for tx in txs.iter() {
let _ = check_sig(tx);
}
})
}
#[bench]
fn bench_check_sig_serial_100(b: &mut Bencher) {
let txs = default_mock_txs(100);
b.iter(|| {
for tx in txs.iter() {
let _ = check_sig(tx);
}
})
}
#[bench]
fn bench_check_sig_serial_1000(b: &mut Bencher) {
let txs = default_mock_txs(1000);
b.iter(|| {
for tx in txs.iter() {
let _ = check_sig(tx);
}
})
}
| 33.315098 | 132 | 0.601117 |
500c8c3cdd7cbaedfc73b7cfd34496c9b9da4f4d | 5,211 | //Copyright 2020 WHTCORPS
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
//! This module defines core types that support the transaction processor.
use std::collections::BTreeMap;
use std::fmt;
use value_rc::{
ValueRc,
};
use symbols::{
Keyword,
PlainSymbol,
};
use types::{
ValueAndSpan,
};
#[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
pub enum TempId {
External(String),
Internal(i64),
}
impl TempId {
pub fn into_external(self) -> Option<String> {
match self {
TempId::External(s) => Some(s),
TempId::Internal(_) => None,
}
}
}
impl fmt::Display for TempId {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match self {
&TempId::External(ref s) => write!(f, "{}", s),
&TempId::Internal(x) => write!(f, "<tempid {}>", x),
}
}
}
#[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
pub enum CausetidOrSolitonid {
Causetid(i64),
Solitonid(Keyword),
}
impl From<i64> for CausetidOrSolitonid {
fn from(v: i64) -> Self {
CausetidOrSolitonid::Causetid(v)
}
}
impl From<Keyword> for CausetidOrSolitonid {
fn from(v: Keyword) -> Self {
CausetidOrSolitonid::Solitonid(v)
}
}
impl CausetidOrSolitonid {
pub fn unreversed(&self) -> Option<CausetidOrSolitonid> {
match self {
&CausetidOrSolitonid::Causetid(_) => None,
&CausetidOrSolitonid::Solitonid(ref a) => a.unreversed().map(CausetidOrSolitonid::Solitonid),
}
}
}
#[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
pub struct LookupRef<V> {
pub a: AttributePlace,
// In theory we could allow nested lookup-refs. In practice this would require us to process
// lookup-refs in multiple phases, like how we resolve tempids, which isn't worth the effort.
pub v: V, // An atom.
}
/// A "transaction function" that exposes some value determined by the current transaction. The
/// prototypical example is the current transaction ID, `(transaction-tx)`.
///
/// A natural next step might be to expose the current transaction instant `(transaction-instant)`,
/// but that's more difficult: the transaction itself can set the transaction instant (with some
/// restrictions), so the transaction function must be late-binding. Right now, that's difficult to
/// arrange in the transactor.
///
/// In the future, we might accept arguments; for example, perhaps we might expose `(ancestor
/// (transaction-tx) n)` to find the n-th ancestor of the current transaction. If we do accept
/// arguments, then the special case of `(lookup-ref a v)` should be handled as part of the
/// generalization.
#[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
pub struct TxFunction {
pub op: PlainSymbol,
}
pub type MapNotation<V> = BTreeMap<EntidOrIdent, ValuePlace<V>>;
#[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq)]
pub enum ValuePlace<V> {
// We never know at parse-time whether an integer or solitonid is really a causetid, but we will often
// know when building causets/causets programmatically.
Causetid(CausetidOrSolitonid),
// We never know at parse-time whether a string is really a tempid, but we will often know when
// building causets programmatically.
TempId(ValueRc<TempId>),
LookupRef(LookupRef<V>),
TxFunction(TxFunction),
Vector(Vec<ValuePlace<V>>),
Atom(V),
MapNotation(MapNotation<V>),
}
impl<V: TransactableValueMarker> From<CausetidOrSolitonid> for ValuePlace<V> {
fn from(v: CausetidOrSolitonid) -> Self {
ValuePlace::Causetid(v)
}
}
impl<V: TransactableValueMarker> From<TempId> for ValuePlace<V> {
fn from(v: TempId) -> Self {
ValuePlace::TempId(v.into())
}
}
impl<V: TransactableValueMarker> From<ValueRc<TempId>> for ValuePlace<V> {
fn from(v: ValueRc<TempId>) -> Self {
ValuePlace::TempId(v)
}
}
impl<V: TransactableValueMarker> From<LookupRef<V>> for ValuePlace<V> {
fn from(v: LookupRef<V>) -> Self {
ValuePlace::LookupRef(v)
}
}
impl<V: TransactableValueMarker> From<TxFunction> for ValuePlace<V> {
fn from(v: TxFunction) -> Self {
ValuePlace::TxFunction(v)
}
}
impl<V: TransactableValueMarker> From<Vec<ValuePlace<V>>> for ValuePlace<V> {
fn from(v: Vec<ValuePlace<V>>) -> Self {
ValuePlace::Vector(v)
}
}
impl<V: TransactableValueMarker> From<V> for ValuePlace<V> {
fn from(v: V) -> Self {
ValuePlace::Atom(v)
}
}
impl<V: TransactableValueMarker> From<MapNotation<V>> for ValuePlace<V> {
fn from(v: MapNotation<V>) -> Self {
ValuePlace::MapNotation(v)
}
} | 31.203593 | 106 | 0.672999 |
183de7680a08143532ff735f329b79b9c7f6a96e | 19,125 | use crate::{
extract::RequestParts, handler::on, prelude::*, response::IntoResponse, routing::nest,
routing::MethodFilter, service,
};
use bytes::Bytes;
use futures_util::future::Ready;
use http::{header::AUTHORIZATION, Request, Response, StatusCode, Uri};
use hyper::{Body, Server};
use serde::Deserialize;
use serde_json::json;
use std::{
collections::HashMap,
convert::Infallible,
net::{SocketAddr, TcpListener},
task::{Context, Poll},
time::Duration,
};
use tower::{make::Shared, service_fn, BoxError, Service, ServiceBuilder};
use tower_http::{compression::CompressionLayer, trace::TraceLayer};
mod nest;
#[tokio::test]
async fn hello_world() {
async fn root(_: Request<Body>) -> &'static str {
"Hello, World!"
}
async fn foo(_: Request<Body>) -> &'static str {
"foo"
}
async fn users_create(_: Request<Body>) -> &'static str {
"users#create"
}
let app = route("/", get(root).post(foo)).route("/users", post(users_create));
let addr = run_in_background(app).await;
let client = reqwest::Client::new();
let res = client.get(format!("http://{}", addr)).send().await.unwrap();
let body = res.text().await.unwrap();
assert_eq!(body, "Hello, World!");
let res = client
.post(format!("http://{}", addr))
.send()
.await
.unwrap();
let body = res.text().await.unwrap();
assert_eq!(body, "foo");
let res = client
.post(format!("http://{}/users", addr))
.send()
.await
.unwrap();
let body = res.text().await.unwrap();
assert_eq!(body, "users#create");
}
#[tokio::test]
async fn consume_body() {
let app = route("/", get(|body: String| async { body }));
let addr = run_in_background(app).await;
let client = reqwest::Client::new();
let res = client
.get(format!("http://{}", addr))
.body("foo")
.send()
.await
.unwrap();
let body = res.text().await.unwrap();
assert_eq!(body, "foo");
}
#[tokio::test]
async fn deserialize_body() {
#[derive(Debug, Deserialize)]
struct Input {
foo: String,
}
let app = route(
"/",
post(|input: extract::Json<Input>| async { input.0.foo }),
);
let addr = run_in_background(app).await;
let client = reqwest::Client::new();
let res = client
.post(format!("http://{}", addr))
.json(&json!({ "foo": "bar" }))
.send()
.await
.unwrap();
let body = res.text().await.unwrap();
assert_eq!(body, "bar");
}
#[tokio::test]
async fn consume_body_to_json_requires_json_content_type() {
#[derive(Debug, Deserialize)]
struct Input {
foo: String,
}
let app = route(
"/",
post(|input: extract::Json<Input>| async { input.0.foo }),
);
let addr = run_in_background(app).await;
let client = reqwest::Client::new();
let res = client
.post(format!("http://{}", addr))
.body(r#"{ "foo": "bar" }"#)
.send()
.await
.unwrap();
let status = res.status();
dbg!(res.text().await.unwrap());
assert_eq!(status, StatusCode::BAD_REQUEST);
}
#[tokio::test]
async fn body_with_length_limit() {
use std::iter::repeat;
#[derive(Debug, Deserialize)]
struct Input {
foo: String,
}
const LIMIT: u64 = 8;
let app = route(
"/",
post(|_body: extract::ContentLengthLimit<Bytes, LIMIT>| async {}),
);
let addr = run_in_background(app).await;
let client = reqwest::Client::new();
let res = client
.post(format!("http://{}", addr))
.body(repeat(0_u8).take((LIMIT - 1) as usize).collect::<Vec<_>>())
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let res = client
.post(format!("http://{}", addr))
.body(repeat(0_u8).take(LIMIT as usize).collect::<Vec<_>>())
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let res = client
.post(format!("http://{}", addr))
.body(repeat(0_u8).take((LIMIT + 1) as usize).collect::<Vec<_>>())
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::PAYLOAD_TOO_LARGE);
let res = client
.post(format!("http://{}", addr))
.body(reqwest::Body::wrap_stream(futures_util::stream::iter(
vec![Ok::<_, std::io::Error>(bytes::Bytes::new())],
)))
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::LENGTH_REQUIRED);
}
#[tokio::test]
async fn routing() {
let app = route(
"/users",
get(|_: Request<Body>| async { "users#index" })
.post(|_: Request<Body>| async { "users#create" }),
)
.route("/users/:id", get(|_: Request<Body>| async { "users#show" }))
.route(
"/users/:id/action",
get(|_: Request<Body>| async { "users#action" }),
);
let addr = run_in_background(app).await;
let client = reqwest::Client::new();
let res = client.get(format!("http://{}", addr)).send().await.unwrap();
assert_eq!(res.status(), StatusCode::NOT_FOUND);
let res = client
.get(format!("http://{}/users", addr))
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(res.text().await.unwrap(), "users#index");
let res = client
.post(format!("http://{}/users", addr))
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(res.text().await.unwrap(), "users#create");
let res = client
.get(format!("http://{}/users/1", addr))
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(res.text().await.unwrap(), "users#show");
let res = client
.get(format!("http://{}/users/1/action", addr))
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(res.text().await.unwrap(), "users#action");
}
#[tokio::test]
async fn extracting_url_params() {
let app = route(
"/users/:id",
get(|extract::Path(id): extract::Path<i32>| async move {
assert_eq!(id, 42);
})
.post(
|extract::Path(params_map): extract::Path<HashMap<String, i32>>| async move {
assert_eq!(params_map.get("id").unwrap(), &1337);
},
),
);
let addr = run_in_background(app).await;
let client = reqwest::Client::new();
let res = client
.get(format!("http://{}/users/42", addr))
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let res = client
.post(format!("http://{}/users/1337", addr))
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
}
#[tokio::test]
async fn extracting_url_params_multiple_times() {
let app = route(
"/users/:id",
get(|_: extract::Path<i32>, _: extract::Path<String>| async {}),
);
let addr = run_in_background(app).await;
let client = reqwest::Client::new();
let res = client
.get(format!("http://{}/users/42", addr))
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
}
#[tokio::test]
async fn boxing() {
let app = route(
"/",
on(MethodFilter::Get, |_: Request<Body>| async {
"hi from GET"
})
.on(MethodFilter::Post, |_: Request<Body>| async {
"hi from POST"
}),
)
.layer(tower_http::compression::CompressionLayer::new())
.boxed();
let addr = run_in_background(app).await;
let client = reqwest::Client::new();
let res = client.get(format!("http://{}", addr)).send().await.unwrap();
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(res.text().await.unwrap(), "hi from GET");
let res = client
.post(format!("http://{}", addr))
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(res.text().await.unwrap(), "hi from POST");
}
#[tokio::test]
async fn service_handlers() {
use crate::service::ServiceExt as _;
use tower_http::services::ServeFile;
let app = route(
"/echo",
service::post(
service_fn(|req: Request<Body>| async move {
Ok::<_, BoxError>(Response::new(req.into_body()))
})
.handle_error(|_error: BoxError| Ok(StatusCode::INTERNAL_SERVER_ERROR)),
),
)
.route(
"/static/Cargo.toml",
service::on(
MethodFilter::Get,
ServeFile::new("Cargo.toml").handle_error(|error: std::io::Error| {
Ok::<_, Infallible>((StatusCode::INTERNAL_SERVER_ERROR, error.to_string()))
}),
),
);
let addr = run_in_background(app).await;
let client = reqwest::Client::new();
let res = client
.post(format!("http://{}/echo", addr))
.body("foobar")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(res.text().await.unwrap(), "foobar");
let res = client
.get(format!("http://{}/static/Cargo.toml", addr))
.body("foobar")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
assert!(res.text().await.unwrap().contains("edition ="));
}
#[tokio::test]
async fn routing_between_services() {
use std::convert::Infallible;
use tower::service_fn;
async fn handle(_: Request<Body>) -> &'static str {
"handler"
}
let app = route(
"/one",
service::get(service_fn(|_: Request<Body>| async {
Ok::<_, Infallible>(Response::new(Body::from("one get")))
}))
.post(service_fn(|_: Request<Body>| async {
Ok::<_, Infallible>(Response::new(Body::from("one post")))
}))
.on(
MethodFilter::Put,
service_fn(|_: Request<Body>| async {
Ok::<_, Infallible>(Response::new(Body::from("one put")))
}),
),
)
.route(
"/two",
service::on(MethodFilter::Get, handle.into_service()),
);
let addr = run_in_background(app).await;
let client = reqwest::Client::new();
let res = client
.get(format!("http://{}/one", addr))
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(res.text().await.unwrap(), "one get");
let res = client
.post(format!("http://{}/one", addr))
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(res.text().await.unwrap(), "one post");
let res = client
.put(format!("http://{}/one", addr))
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(res.text().await.unwrap(), "one put");
let res = client
.get(format!("http://{}/two", addr))
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(res.text().await.unwrap(), "handler");
}
#[tokio::test]
async fn middleware_on_single_route() {
use tower::ServiceBuilder;
use tower_http::{compression::CompressionLayer, trace::TraceLayer};
async fn handle(_: Request<Body>) -> &'static str {
"Hello, World!"
}
let app = route(
"/",
get(handle.layer(
ServiceBuilder::new()
.layer(TraceLayer::new_for_http())
.layer(CompressionLayer::new())
.into_inner(),
)),
);
let addr = run_in_background(app).await;
let res = reqwest::get(format!("http://{}", addr)).await.unwrap();
let body = res.text().await.unwrap();
assert_eq!(body, "Hello, World!");
}
#[tokio::test]
async fn handling_errors_from_layered_single_routes() {
async fn handle(_req: Request<Body>) -> &'static str {
tokio::time::sleep(Duration::from_secs(10)).await;
""
}
let app = route(
"/",
get(handle
.layer(
ServiceBuilder::new()
.timeout(Duration::from_millis(100))
.layer(TraceLayer::new_for_http())
.into_inner(),
)
.handle_error(|_error: BoxError| {
Ok::<_, Infallible>(StatusCode::INTERNAL_SERVER_ERROR)
})),
);
let addr = run_in_background(app).await;
let res = reqwest::get(format!("http://{}", addr)).await.unwrap();
assert_eq!(res.status(), StatusCode::INTERNAL_SERVER_ERROR);
}
#[tokio::test]
async fn layer_on_whole_router() {
async fn handle(_req: Request<Body>) -> &'static str {
tokio::time::sleep(Duration::from_secs(10)).await;
""
}
let app = route("/", get(handle))
.layer(
ServiceBuilder::new()
.layer(CompressionLayer::new())
.timeout(Duration::from_millis(100))
.into_inner(),
)
.handle_error(|_err: BoxError| Ok::<_, Infallible>(StatusCode::INTERNAL_SERVER_ERROR));
let addr = run_in_background(app).await;
let res = reqwest::get(format!("http://{}", addr)).await.unwrap();
assert_eq!(res.status(), StatusCode::INTERNAL_SERVER_ERROR);
}
#[tokio::test]
async fn typed_header() {
use extract::TypedHeader;
async fn handle(TypedHeader(user_agent): TypedHeader<headers::UserAgent>) -> impl IntoResponse {
user_agent.to_string()
}
let app = route("/", get(handle));
let addr = run_in_background(app).await;
let client = reqwest::Client::new();
let res = client
.get(format!("http://{}", addr))
.header("user-agent", "foobar")
.send()
.await
.unwrap();
let body = res.text().await.unwrap();
assert_eq!(body, "foobar");
let res = client.get(format!("http://{}", addr)).send().await.unwrap();
let body = res.text().await.unwrap();
assert_eq!(body, "invalid HTTP header (user-agent)");
}
#[tokio::test]
async fn service_in_bottom() {
async fn handler(_req: Request<hyper::Body>) -> Result<Response<hyper::Body>, hyper::Error> {
Ok(Response::new(hyper::Body::empty()))
}
let app = route("/", service::get(service_fn(handler)));
run_in_background(app).await;
}
#[tokio::test]
async fn test_extractor_middleware() {
struct RequireAuth;
#[async_trait::async_trait]
impl<B> extract::FromRequest<B> for RequireAuth
where
B: Send,
{
type Rejection = StatusCode;
async fn from_request(req: &mut RequestParts<B>) -> Result<Self, Self::Rejection> {
if let Some(auth) = req
.headers()
.expect("headers already extracted")
.get("authorization")
.and_then(|v| v.to_str().ok())
{
if auth == "secret" {
return Ok(Self);
}
}
Err(StatusCode::UNAUTHORIZED)
}
}
async fn handler() {}
let app = route(
"/",
get(handler.layer(extract::extractor_middleware::<RequireAuth>())),
);
let addr = run_in_background(app).await;
let client = reqwest::Client::new();
let res = client
.get(format!("http://{}/", addr))
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::UNAUTHORIZED);
let res = client
.get(format!("http://{}/", addr))
.header(AUTHORIZATION, "secret")
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
}
#[tokio::test]
async fn wrong_method_handler() {
let app = route("/", get(|| async {}).post(|| async {})).route("/foo", patch(|| async {}));
let addr = run_in_background(app).await;
let client = reqwest::Client::new();
let res = client
.patch(format!("http://{}", addr))
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::METHOD_NOT_ALLOWED);
let res = client
.patch(format!("http://{}/foo", addr))
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let res = client
.post(format!("http://{}/foo", addr))
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::METHOD_NOT_ALLOWED);
let res = client
.get(format!("http://{}/bar", addr))
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::NOT_FOUND);
}
#[tokio::test]
async fn wrong_method_service() {
#[derive(Clone)]
struct Svc;
impl<R> Service<R> for Svc {
type Response = Response<http_body::Empty<Bytes>>;
type Error = Infallible;
type Future = Ready<Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, _req: R) -> Self::Future {
futures_util::future::ok(Response::new(http_body::Empty::new()))
}
}
let app = route("/", service::get(Svc).post(Svc)).route("/foo", service::patch(Svc));
let addr = run_in_background(app).await;
let client = reqwest::Client::new();
let res = client
.patch(format!("http://{}", addr))
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::METHOD_NOT_ALLOWED);
let res = client
.patch(format!("http://{}/foo", addr))
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let res = client
.post(format!("http://{}/foo", addr))
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::METHOD_NOT_ALLOWED);
let res = client
.get(format!("http://{}/bar", addr))
.send()
.await
.unwrap();
assert_eq!(res.status(), StatusCode::NOT_FOUND);
}
/// Run a `tower::Service` in the background and get a URI for it.
async fn run_in_background<S, ResBody>(svc: S) -> SocketAddr
where
S: Service<Request<Body>, Response = Response<ResBody>> + Clone + Send + 'static,
ResBody: http_body::Body + Send + 'static,
ResBody::Data: Send,
ResBody::Error: Into<BoxError>,
S::Future: Send,
S::Error: Into<BoxError>,
{
let listener = TcpListener::bind("127.0.0.1:0").expect("Could not bind ephemeral socket");
let addr = listener.local_addr().unwrap();
println!("Listening on {}", addr);
let (tx, rx) = tokio::sync::oneshot::channel();
tokio::spawn(async move {
let server = Server::from_tcp(listener).unwrap().serve(Shared::new(svc));
tx.send(()).unwrap();
server.await.expect("server error");
});
rx.await.unwrap();
addr
}
| 26.37931 | 100 | 0.546405 |
2f000cb95f679f46e58f2bedfa56fcbec043bd07 | 97 | use std::fs::File;
fn main() {
let f = File::open("hello.txt").expect("開啟 hello.txt 失敗");
}
| 16.166667 | 62 | 0.57732 |
1a071e261c463db336e3c85f8c7fee33784b5d60 | 2,140 | #![no_std]
extern crate embedded_hal as hal;
extern crate nb;
#[cfg(any(feature = "sams70j19"))] pub use atsams70j19 as target_device;
#[cfg(any(feature = "sams70j19b"))] pub use atsams70j19b as target_device;
#[cfg(any(feature = "sams70j20"))] pub use atsams70j20 as target_device;
#[cfg(any(feature = "sams70j20b"))] pub use atsams70j20b as target_device;
#[cfg(any(feature = "sams70j21"))] pub use atsams70j21 as target_device;
#[cfg(any(feature = "sams70j21b"))] pub use atsams70j21b as target_device;
#[cfg(any(feature = "sams70n19"))] pub use atsams70n19 as target_device;
#[cfg(any(feature = "sams70n19b"))] pub use atsams70n19b as target_device;
#[cfg(any(feature = "sams70n20"))] pub use atsams70n20 as target_device;
#[cfg(any(feature = "sams70n20b"))] pub use atsams70n20b as target_device;
#[cfg(any(feature = "sams70n21 "))] pub use atsams70n21 as target_device;
#[cfg(any(feature = "sams70n21b"))] pub use atsams70n21b as target_device;
#[cfg(any(feature = "sams70q19"))] pub use atsams70q19 as target_device;
#[cfg(any(feature = "sams70q19b"))] pub use atsams70q19b as target_device;
#[cfg(any(feature = "sams70q20"))] pub use atsams70q20 as target_device;
#[cfg(any(feature = "sams70q20b"))] pub use atsams70q20b as target_device;
#[cfg(any(feature = "sams70q21"))] pub use atsams70q21 as target_device;
#[cfg(any(feature = "sams70q21b"))] pub use atsams70q21b as target_device;
#[cfg(any(feature = "same70j19b"))] pub use atsame70j19b as target_device;
#[cfg(any(feature = "same70j20b"))] pub use atsame70j20b as target_device;
#[cfg(any(feature = "same70j21b"))] pub use atsame70j21b as target_device;
#[cfg(any(feature = "same70n19b"))] pub use atsame70n19b as target_device;
#[cfg(any(feature = "same70n20b"))] pub use atsame70n20b as target_device;
#[cfg(any(feature = "same70n21b"))] pub use atsame70n21b as target_device;
#[cfg(any(feature = "same70q19b"))] pub use atsame70q19b as target_device;
#[cfg(any(feature = "same70q20b"))] pub use atsame70q20b as target_device;
#[cfg(any(feature = "same70q21b"))] pub use atsame70q21b as target_device;
pub mod serial;
| 61.142857 | 75 | 0.725701 |
d61009a0c275694cae29e5744f822b5880ed589f | 2,596 | use day_4::lib::passport::*;
use day_4::lib::passport_validator::*;
use std::io::Error as ioError;
fn main() {
let passports = read_input().unwrap();
let also_passports = read_input().unwrap();
let first_solution = part_one_solution(passports);
let second_solution = part_two_solution(also_passports);
println!("The Part One Solution is: {}", first_solution);
println!("The Part Two Solution is: {}", second_solution);
}
fn part_one_solution(passports: Vec<Vec<String>>) -> i32 {
let mut passport_objs: Vec<Passport> = vec![];
for passport in passports {
passport_objs.push(Passport::new(passport));
}
passport_objs.iter().fold(0, |acc, passport| {
if passport.clone().is_valid() {
return acc + 1;
}
acc
})
}
fn part_two_solution(passports: Vec<Vec<String>>) -> i32 {
let mut passport_objs: Vec<Passport> = vec![];
for passport in passports {
passport_objs.push(Passport::new(passport.to_vec()));
}
passport_objs.iter().fold(0, |acc, passport| {
if passport_is_valid(passport) {
return acc + 1;
}
acc
})
}
fn passport_is_valid(passport: &Passport) -> bool {
validate_passport(passport)
}
fn read_input() -> Result<Vec<Vec<String>>, ioError> {
let contents = include_str!("puzzle_data");
process_input(contents)
}
fn process_input(contents: &str) -> Result<Vec<Vec<String>>, ioError> {
Ok(contents
.split("\n\n")
.map(|passport| {
passport
.split(['\n', ' '].as_ref())
.map(String::from)
.collect()
})
.collect())
}
#[cfg(test)]
mod tests {
use super::*;
fn test_data() -> Vec<Vec<String>> {
let contents = include_str!("example_data");
process_input(contents).unwrap()
}
fn part_two_test_data() -> Vec<Vec<String>> {
let contents = include_str!("part_two_example_data");
process_input(contents).unwrap()
}
#[test]
fn test_part_one_example_data() {
let example_data = test_data();
assert_eq!(part_one_solution(example_data), 2);
}
#[test]
fn test_part_one_solution() {
assert_eq!(part_one_solution(read_input().unwrap()), 190);
}
#[test]
fn test_part_two_valid_passports() {
let example_data = part_two_test_data();
assert_eq!(part_two_solution(example_data), 4);
}
#[test]
fn test_part_two_solution() {
assert_eq!(part_two_solution(read_input().unwrap()), 121)
}
}
| 24.261682 | 71 | 0.603236 |
f9daf285998be72c56856907087f790c612863c4 | 6,120 | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
use crate::{AvroResult, Error};
use serde_json::{Map, Value};
use std::{convert::TryFrom, i64, io::Read, sync::Once};
/// Maximum number of bytes that can be allocated when decoding
/// Avro-encoded values. This is a protection against ill-formed
/// data, whose length field might be interpreted as enourmous.
/// See max_allocation_bytes to change this limit.
pub static mut MAX_ALLOCATION_BYTES: usize = 512 * 1024 * 1024;
static MAX_ALLOCATION_BYTES_ONCE: Once = Once::new();
pub trait MapHelper {
fn string(&self, key: &str) -> Option<String>;
fn name(&self) -> Option<String> {
self.string("name")
}
fn doc(&self) -> Option<String> {
self.string("doc")
}
}
impl MapHelper for Map<String, Value> {
fn string(&self, key: &str) -> Option<String> {
self.get(key)
.and_then(|v| v.as_str())
.map(|v| v.to_string())
}
}
pub fn read_long<R: Read>(reader: &mut R) -> AvroResult<i64> {
zag_i64(reader)
}
pub fn zig_i32(n: i32, buffer: &mut Vec<u8>) {
zig_i64(n as i64, buffer)
}
pub fn zig_i64(n: i64, buffer: &mut Vec<u8>) {
encode_variable(((n << 1) ^ (n >> 63)) as u64, buffer)
}
pub fn zag_i32<R: Read>(reader: &mut R) -> AvroResult<i32> {
let i = zag_i64(reader)?;
i32::try_from(i).map_err(|e| Error::ZagI32(e, i))
}
pub fn zag_i64<R: Read>(reader: &mut R) -> AvroResult<i64> {
let z = decode_variable(reader)?;
Ok(if z & 0x1 == 0 {
(z >> 1) as i64
} else {
!(z >> 1) as i64
})
}
fn encode_variable(mut z: u64, buffer: &mut Vec<u8>) {
loop {
if z <= 0x7F {
buffer.push((z & 0x7F) as u8);
break;
} else {
buffer.push((0x80 | (z & 0x7F)) as u8);
z >>= 7;
}
}
}
fn decode_variable<R: Read>(reader: &mut R) -> AvroResult<u64> {
let mut i = 0u64;
let mut buf = [0u8; 1];
let mut j = 0;
loop {
if j > 9 {
// if j * 7 > 64
return Err(Error::IntegerOverflow);
}
reader
.read_exact(&mut buf[..])
.map_err(Error::ReadVariableIntegerBytes)?;
i |= (u64::from(buf[0] & 0x7F)) << (j * 7);
if (buf[0] >> 7) == 0 {
break;
} else {
j += 1;
}
}
Ok(i)
}
/// Set a new maximum number of bytes that can be allocated when decoding data.
/// Once called, the limit cannot be changed.
///
/// **NOTE** This function must be called before decoding **any** data. The
/// library leverages [`std::sync::Once`](https://doc.rust-lang.org/std/sync/struct.Once.html)
/// to set the limit either when calling this method, or when decoding for
/// the first time.
pub fn max_allocation_bytes(num_bytes: usize) -> usize {
unsafe {
MAX_ALLOCATION_BYTES_ONCE.call_once(|| {
MAX_ALLOCATION_BYTES = num_bytes;
});
MAX_ALLOCATION_BYTES
}
}
pub fn safe_len(len: usize) -> AvroResult<usize> {
let max_bytes = max_allocation_bytes(512 * 1024 * 1024);
if len <= max_bytes {
Ok(len)
} else {
Err(Error::MemoryAllocation {
desired: len,
maximum: max_bytes,
})
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_zigzag() {
let mut a = Vec::new();
let mut b = Vec::new();
zig_i32(42i32, &mut a);
zig_i64(42i64, &mut b);
assert_eq!(a, b);
}
#[test]
fn test_zig_i64() {
let mut s = Vec::new();
zig_i64(std::i32::MAX as i64, &mut s);
assert_eq!(s, [254, 255, 255, 255, 15]);
s.clear();
zig_i64(std::i32::MAX as i64 + 1, &mut s);
assert_eq!(s, [128, 128, 128, 128, 16]);
s.clear();
zig_i64(std::i32::MIN as i64, &mut s);
assert_eq!(s, [255, 255, 255, 255, 15]);
s.clear();
zig_i64(std::i32::MIN as i64 - 1, &mut s);
assert_eq!(s, [129, 128, 128, 128, 16]);
s.clear();
zig_i64(i64::MAX, &mut s);
assert_eq!(s, [254, 255, 255, 255, 255, 255, 255, 255, 255, 1]);
s.clear();
zig_i64(i64::MIN, &mut s);
assert_eq!(s, [255, 255, 255, 255, 255, 255, 255, 255, 255, 1]);
}
#[test]
fn test_zig_i32() {
let mut s = Vec::new();
zig_i32(std::i32::MAX / 2, &mut s);
assert_eq!(s, [254, 255, 255, 255, 7]);
s.clear();
zig_i32(std::i32::MIN / 2, &mut s);
assert_eq!(s, [255, 255, 255, 255, 7]);
s.clear();
zig_i32(-(std::i32::MIN / 2), &mut s);
assert_eq!(s, [128, 128, 128, 128, 8]);
s.clear();
zig_i32(std::i32::MIN / 2 - 1, &mut s);
assert_eq!(s, [129, 128, 128, 128, 8]);
s.clear();
zig_i32(std::i32::MAX, &mut s);
assert_eq!(s, [254, 255, 255, 255, 15]);
s.clear();
zig_i32(std::i32::MIN, &mut s);
assert_eq!(s, [255, 255, 255, 255, 15]);
}
#[test]
fn test_overflow() {
let causes_left_shift_overflow: &[u8] = &[0xe1, 0xe1, 0xe1, 0xe1, 0xe1];
assert!(decode_variable(&mut &*causes_left_shift_overflow).is_err());
}
#[test]
fn test_safe_len() {
assert_eq!(42usize, safe_len(42usize).unwrap());
assert!(safe_len(1024 * 1024 * 1024).is_err());
}
}
| 27.945205 | 94 | 0.561438 |
16577a29d8209add144592d1022e2b00ac381045 | 2,076 | // MIT License
//
// Copyright (c) 2020 Ankur Srivastava
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
use serde::{Deserialize, Serialize};
#[derive(Deserialize, Serialize, Debug, Default)]
pub struct ResCredit {
pub credit: Credit,
}
#[derive(Deserialize, Serialize, Debug, Default)]
pub struct Credit {
pub code: String,
pub remaining_value: String,
/// Allowed values: "discount", "employee", "evaluation", "internal",
/// "other", "outage", "purchase", "sponsorship", "trial", "trial_over"
#[serde(rename = "type")]
pub credit_type: String,
}
#[derive(Deserialize, Serialize, Debug, Default)]
pub struct Invoice {
pub currency: String,
pub download_cookie: String,
pub invoice_number: String,
pub period_begin: String,
pub period_end: String,
pub state: String,
pub total_inc_vat: String,
pub total_vat_zero: String,
}
#[derive(Deserialize, Serialize, Debug, Default)]
pub struct Invoices {
pub invoices: Vec<Invoice>,
}
#[derive(Deserialize, Serialize, Debug, Default)]
pub struct Credits {
pub credits: Vec<Credit>,
}
| 34.032787 | 80 | 0.745183 |
6291b1c586f6d43680eea94216d95670d17a40f4 | 6,469 | //! A custom kubelet backend that can run [WASI](https://wasi.dev/) based workloads
//!
//! The crate provides the [`WasiProvider`] type which can be used
//! as a provider with [`kubelet`].
//!
//! # Example
//! ```rust,no_run
//! use kubelet::{Kubelet, config::Config};
//! use kubelet::store::oci::FileStore;
//! use std::sync::Arc;
//! use wasi_provider::WasiProvider;
//!
//! async {
//! // Get a configuration for the Kubelet
//! let kubelet_config = Config::default();
//! let client = oci_distribution::Client::default();
//! let store = Arc::new(FileStore::new(client, &std::path::PathBuf::from("")));
//!
//! // Load a kubernetes configuration
//! let kubeconfig = kube::Config::infer().await.unwrap();
//! let plugin_registry = Arc::new(Default::default());
//!
//! // Instantiate the provider type
//! let provider = WasiProvider::new(store, &kubelet_config, kubeconfig.clone(), plugin_registry).await.unwrap();
//!
//! // Instantiate the Kubelet
//! let kubelet = Kubelet::new(provider, kubeconfig, kubelet_config).await.unwrap();
//! // Start the Kubelet and block on it
//! kubelet.start().await.unwrap();
//! };
//! ```
#![deny(missing_docs)]
mod wasi_runtime;
use std::collections::HashMap;
use std::convert::TryFrom;
use std::path::PathBuf;
use std::sync::Arc;
use async_trait::async_trait;
use kubelet::node::Builder;
use kubelet::plugin_watcher::PluginRegistry;
use kubelet::pod::state::prelude::SharedState;
use kubelet::pod::{Handle, Pod, PodKey};
use kubelet::provider::{Provider, ProviderError};
use kubelet::state::common::registered::Registered;
use kubelet::state::common::terminated::Terminated;
use kubelet::state::common::{GenericProvider, GenericProviderState};
use kubelet::store::Store;
use kubelet::volume::Ref;
use tokio::sync::RwLock;
use wasi_runtime::Runtime;
mod states;
use states::pod::PodState;
const TARGET_WASM32_WASI: &str = "wasm32-wasi";
const LOG_DIR_NAME: &str = "wasi-logs";
const VOLUME_DIR: &str = "volumes";
/// WasiProvider provides a Kubelet runtime implementation that executes WASM
/// binaries conforming to the WASI spec.
#[derive(Clone)]
pub struct WasiProvider {
shared: ProviderState,
}
type PodHandleMap = Arc<RwLock<HashMap<PodKey, Arc<Handle<Runtime, wasi_runtime::HandleFactory>>>>>;
/// Provider-level state shared between all pods
#[derive(Clone)]
pub struct ProviderState {
handles: PodHandleMap,
store: Arc<dyn Store + Sync + Send>,
log_path: PathBuf,
client: kube::Client,
volume_path: PathBuf,
plugin_registry: Arc<PluginRegistry>,
}
#[async_trait]
impl GenericProviderState for ProviderState {
fn client(&self) -> kube::client::Client {
self.client.clone()
}
fn store(&self) -> std::sync::Arc<(dyn Store + Send + Sync + 'static)> {
self.store.clone()
}
fn volume_path(&self) -> PathBuf {
self.volume_path.clone()
}
fn plugin_registry(&self) -> Option<Arc<PluginRegistry>> {
Some(self.plugin_registry.clone())
}
async fn stop(&self, pod: &Pod) -> anyhow::Result<()> {
let key = PodKey::from(pod);
let mut handle_writer = self.handles.write().await;
if let Some(handle) = handle_writer.get_mut(&key) {
handle.stop().await
} else {
Ok(())
}
}
}
impl WasiProvider {
/// Create a new wasi provider from a module store and a kubelet config
pub async fn new(
store: Arc<dyn Store + Sync + Send>,
config: &kubelet::config::Config,
kubeconfig: kube::Config,
plugin_registry: Arc<PluginRegistry>,
) -> anyhow::Result<Self> {
let log_path = config.data_dir.join(LOG_DIR_NAME);
let volume_path = config.data_dir.join(VOLUME_DIR);
tokio::fs::create_dir_all(&log_path).await?;
tokio::fs::create_dir_all(&volume_path).await?;
let client = kube::Client::try_from(kubeconfig)?;
Ok(Self {
shared: ProviderState {
handles: Default::default(),
store,
log_path,
volume_path,
client,
plugin_registry,
},
})
}
}
struct ModuleRunContext {
modules: HashMap<String, Vec<u8>>,
volumes: HashMap<String, Ref>,
}
#[async_trait::async_trait]
impl Provider for WasiProvider {
type ProviderState = ProviderState;
type InitialState = Registered<Self>;
type TerminatedState = Terminated<Self>;
type PodState = PodState;
const ARCH: &'static str = TARGET_WASM32_WASI;
fn provider_state(&self) -> SharedState<ProviderState> {
Arc::new(RwLock::new(self.shared.clone()))
}
async fn node(&self, builder: &mut Builder) -> anyhow::Result<()> {
builder.set_architecture("wasm-wasi");
builder.add_taint("NoSchedule", "kubernetes.io/arch", Self::ARCH);
builder.add_taint("NoExecute", "kubernetes.io/arch", Self::ARCH);
Ok(())
}
async fn initialize_pod_state(&self, pod: &Pod) -> anyhow::Result<Self::PodState> {
Ok(PodState::new(pod))
}
async fn logs(
&self,
namespace: String,
pod_name: String,
container_name: String,
sender: kubelet::log::Sender,
) -> anyhow::Result<()> {
let mut handles = self.shared.handles.write().await;
let handle = handles
.get_mut(&PodKey::new(&namespace, &pod_name))
.ok_or_else(|| ProviderError::PodNotFound {
pod_name: pod_name.clone(),
})?;
handle.output(&container_name, sender).await
}
fn plugin_registry(&self) -> Option<Arc<PluginRegistry>> {
Some(self.shared.plugin_registry.clone())
}
fn volume_path(&self) -> Option<PathBuf> {
Some(self.shared.volume_path())
}
}
impl GenericProvider for WasiProvider {
type ProviderState = ProviderState;
type PodState = PodState;
type RunState = crate::states::pod::initializing::Initializing;
fn validate_pod_runnable(_pod: &Pod) -> anyhow::Result<()> {
Ok(())
}
fn validate_container_runnable(
container: &kubelet::container::Container,
) -> anyhow::Result<()> {
if let Some(image) = container.image()? {
if image.whole().starts_with("k8s.gcr.io/kube-proxy") {
return Err(anyhow::anyhow!("Cannot run kube-proxy"));
}
}
Ok(())
}
}
| 31.100962 | 117 | 0.628845 |
ab296e3b32b21fa72f4a212b2fef6b425f996792 | 20,662 | use crate::{smt_store_impl::SMTStore, traits::KVStore};
use gw_common::{smt::SMT, CKB_SUDT_SCRIPT_ARGS, H256};
use gw_db::schema::{
Col, COLUMN_BLOCK, COLUMN_BLOCK_DEPOSITION_REQUESTS, COLUMN_BLOCK_GLOBAL_STATE,
COLUMN_BLOCK_SMT_BRANCH, COLUMN_BLOCK_SMT_LEAF, COLUMN_BLOCK_STATE_RECORD,
COLUMN_CUSTODIAN_ASSETS, COLUMN_INDEX, COLUMN_L2BLOCK_COMMITTED_INFO, COLUMN_META,
COLUMN_TRANSACTION, COLUMN_TRANSACTION_INFO, COLUMN_TRANSACTION_RECEIPT,
META_ACCOUNT_SMT_COUNT_KEY, META_ACCOUNT_SMT_ROOT_KEY, META_BLOCK_SMT_ROOT_KEY,
META_CHAIN_ID_KEY, META_TIP_BLOCK_HASH_KEY,
};
use gw_db::{
error::Error, iter::DBIter, DBIterator, Direction::Forward, IteratorMode, RocksDBTransaction,
};
use gw_types::{
packed::{self, TransactionKey},
prelude::*,
};
use std::{borrow::BorrowMut, collections::HashMap};
const NUMBER_OF_CONFIRMATION: u64 = 100;
pub struct StoreTransaction {
pub(crate) inner: RocksDBTransaction,
}
impl KVStore for StoreTransaction {
fn get(&self, col: Col, key: &[u8]) -> Option<Box<[u8]>> {
self.inner
.get(col, key)
.expect("db operation should be ok")
.map(|v| Box::<[u8]>::from(v.as_ref()))
}
fn get_iter(&self, col: Col, mode: IteratorMode) -> DBIter {
self.inner
.iter(col, mode)
.expect("db operation should be ok")
}
fn insert_raw(&self, col: Col, key: &[u8], value: &[u8]) -> Result<(), Error> {
self.inner.put(col, key, value)
}
fn delete(&self, col: Col, key: &[u8]) -> Result<(), Error> {
self.inner.delete(col, key)
}
}
impl StoreTransaction {
pub fn commit(&self) -> Result<(), Error> {
self.inner.commit()
}
pub fn setup_chain_id(&self, chain_id: H256) -> Result<(), Error> {
self.insert_raw(COLUMN_META, META_CHAIN_ID_KEY, chain_id.as_slice())?;
Ok(())
}
pub fn get_block_smt_root(&self) -> Result<H256, Error> {
let slice = self
.get(COLUMN_META, META_BLOCK_SMT_ROOT_KEY)
.expect("must has root");
debug_assert_eq!(slice.len(), 32);
let mut root = [0u8; 32];
root.copy_from_slice(&slice);
Ok(root.into())
}
pub fn set_block_smt_root(&self, root: H256) -> Result<(), Error> {
self.insert_raw(COLUMN_META, META_BLOCK_SMT_ROOT_KEY, root.as_slice())?;
Ok(())
}
pub fn block_smt(&self) -> Result<SMT<SMTStore<'_, Self>>, Error> {
let root = self.get_block_smt_root()?;
let smt_store = SMTStore::new(COLUMN_BLOCK_SMT_LEAF, COLUMN_BLOCK_SMT_BRANCH, self);
Ok(SMT::new(root, smt_store))
}
pub fn get_account_smt_root(&self) -> Result<H256, Error> {
let slice = self
.get(COLUMN_META, META_ACCOUNT_SMT_ROOT_KEY)
.expect("must has root");
debug_assert_eq!(slice.len(), 32);
let mut root = [0u8; 32];
root.copy_from_slice(&slice);
Ok(root.into())
}
pub fn set_account_smt_root(&self, root: H256) -> Result<(), Error> {
self.insert_raw(COLUMN_META, META_ACCOUNT_SMT_ROOT_KEY, root.as_slice())?;
Ok(())
}
pub fn set_account_count(&self, count: u32) -> Result<(), Error> {
let count: packed::Uint32 = count.pack();
self.insert_raw(COLUMN_META, META_ACCOUNT_SMT_COUNT_KEY, count.as_slice())
.expect("insert");
Ok(())
}
pub fn get_account_count(&self) -> Result<u32, Error> {
let slice = self
.get(COLUMN_META, META_ACCOUNT_SMT_COUNT_KEY)
.expect("account count");
let count = packed::Uint32Reader::from_slice_should_be_ok(&slice.as_ref()).to_entity();
Ok(count.unpack())
}
pub fn get_tip_block_hash(&self) -> Result<H256, Error> {
let slice = self
.get(COLUMN_META, META_TIP_BLOCK_HASH_KEY)
.expect("get tip block hash");
Ok(
packed::Byte32Reader::from_slice_should_be_ok(&slice.as_ref())
.to_entity()
.unpack(),
)
}
pub fn get_tip_block(&self) -> Result<packed::L2Block, Error> {
let tip_block_hash = self.get_tip_block_hash()?;
Ok(self.get_block(&tip_block_hash)?.expect("get tip block"))
}
pub fn get_block_hash_by_number(&self, number: u64) -> Result<Option<H256>, Error> {
let block_number: packed::Uint64 = number.pack();
match self.get(COLUMN_INDEX, block_number.as_slice()) {
Some(slice) => Ok(Some(
packed::Byte32Reader::from_slice_should_be_ok(&slice.as_ref())
.to_entity()
.unpack(),
)),
None => Ok(None),
}
}
pub fn get_block_number(&self, block_hash: &H256) -> Result<Option<u64>, Error> {
match self.get(COLUMN_INDEX, block_hash.as_slice()) {
Some(slice) => Ok(Some(
packed::Uint64Reader::from_slice_should_be_ok(&slice.as_ref())
.to_entity()
.unpack(),
)),
None => Ok(None),
}
}
pub fn get_block(&self, block_hash: &H256) -> Result<Option<packed::L2Block>, Error> {
match self.get(COLUMN_BLOCK, block_hash.as_slice()) {
Some(slice) => Ok(Some(
packed::L2BlockReader::from_slice_should_be_ok(&slice.as_ref()).to_entity(),
)),
None => Ok(None),
}
}
pub fn get_transaction(&self, tx_hash: &H256) -> Result<Option<packed::L2Transaction>, Error> {
if let Some(slice) = self.get(COLUMN_TRANSACTION_INFO, tx_hash.as_slice()) {
let info =
packed::TransactionInfoReader::from_slice_should_be_ok(&slice.as_ref()).to_entity();
let tx_key = info.key();
Ok(self
.get(COLUMN_TRANSACTION, &tx_key.as_slice())
.map(|slice| {
packed::L2TransactionReader::from_slice_should_be_ok(&slice.as_ref())
.to_entity()
}))
} else {
Ok(None)
}
}
pub fn get_transaction_receipt(
&self,
tx_hash: &H256,
) -> Result<Option<packed::TxReceipt>, Error> {
if let Some(slice) = self.get(COLUMN_TRANSACTION_INFO, tx_hash.as_slice()) {
let info =
packed::TransactionInfoReader::from_slice_should_be_ok(&slice.as_ref()).to_entity();
let tx_key = info.key();
self.get_transaction_receipt_by_key(&tx_key)
} else {
Ok(None)
}
}
pub fn get_transaction_receipt_by_key(
&self,
key: &TransactionKey,
) -> Result<Option<packed::TxReceipt>, Error> {
Ok(self
.get(COLUMN_TRANSACTION_RECEIPT, &key.as_slice())
.map(|slice| {
packed::TxReceiptReader::from_slice_should_be_ok(&slice.as_ref()).to_entity()
}))
}
pub fn get_l2block_committed_info(
&self,
block_hash: &H256,
) -> Result<Option<packed::L2BlockCommittedInfo>, Error> {
match self.get(COLUMN_L2BLOCK_COMMITTED_INFO, block_hash.as_slice()) {
Some(slice) => Ok(Some(
packed::L2BlockCommittedInfoReader::from_slice_should_be_ok(&slice.as_ref())
.to_entity(),
)),
None => Ok(None),
}
}
pub fn get_block_deposition_requests(
&self,
block_hash: &H256,
) -> Result<Option<Vec<packed::DepositionRequest>>, Error> {
match self.get(COLUMN_BLOCK_DEPOSITION_REQUESTS, block_hash.as_slice()) {
Some(slice) => Ok(Some(
packed::DepositionRequestVecReader::from_slice_should_be_ok(&slice.as_ref())
.to_entity()
.into_iter()
.collect(),
)),
None => Ok(None),
}
}
pub fn get_block_post_global_state(
&self,
block_hash: &H256,
) -> Result<Option<packed::GlobalState>, Error> {
match self.get(COLUMN_BLOCK_GLOBAL_STATE, block_hash.as_slice()) {
Some(slice) => Ok(Some(
packed::GlobalStateReader::from_slice_should_be_ok(&slice.as_ref()).to_entity(),
)),
None => Ok(None),
}
}
/// key: sudt_script_hash
fn set_custodian_asset(&self, key: H256, value: u128) -> Result<(), Error> {
self.insert_raw(
COLUMN_CUSTODIAN_ASSETS,
key.as_slice(),
&value.to_le_bytes(),
)
}
/// key: sudt_script_hash
pub fn get_custodian_asset(&self, key: H256) -> Result<u128, Error> {
match self.get(COLUMN_CUSTODIAN_ASSETS, key.as_slice()) {
Some(slice) => {
let mut buf = [0u8; 16];
buf.copy_from_slice(&slice);
Ok(u128::from_le_bytes(buf))
}
None => Ok(0),
}
}
pub fn insert_block(
&self,
block: packed::L2Block,
committed_info: packed::L2BlockCommittedInfo,
global_state: packed::GlobalState,
tx_receipts: Vec<packed::TxReceipt>,
deposition_requests: Vec<packed::DepositionRequest>,
) -> Result<(), Error> {
debug_assert_eq!(block.transactions().len(), tx_receipts.len());
let block_hash = block.hash();
self.insert_raw(COLUMN_BLOCK, &block_hash, block.as_slice())?;
self.insert_raw(
COLUMN_L2BLOCK_COMMITTED_INFO,
&block_hash,
committed_info.as_slice(),
)?;
self.insert_raw(
COLUMN_BLOCK_GLOBAL_STATE,
&block_hash,
global_state.as_slice(),
)?;
let deposition_requests_vec: packed::DepositionRequestVec = deposition_requests.pack();
self.insert_raw(
COLUMN_BLOCK_DEPOSITION_REQUESTS,
&block_hash,
deposition_requests_vec.as_slice(),
)?;
for (index, (tx, tx_receipt)) in block
.transactions()
.into_iter()
.zip(tx_receipts)
.enumerate()
{
let key = TransactionKey::build_transaction_key(tx.hash().pack(), index as u32);
self.insert_raw(COLUMN_TRANSACTION, &key.as_slice(), tx.as_slice())?;
self.insert_raw(
COLUMN_TRANSACTION_RECEIPT,
&key.as_slice(),
tx_receipt.as_slice(),
)?;
}
Ok(())
}
/// Update custodian assets
fn update_custodian_assets<
AddIter: Iterator<Item = CustodianChange>,
RemIter: Iterator<Item = CustodianChange>,
>(
&self,
addition: AddIter,
removed: RemIter,
) -> Result<(), Error> {
let mut touched_custodian_assets: HashMap<H256, u128> = Default::default();
for request in addition {
let CustodianChange {
sudt_script_hash,
amount,
capacity,
} = request;
// update ckb balance
let ckb_balance = touched_custodian_assets
.entry(CKB_SUDT_SCRIPT_ARGS.into())
.or_insert_with(|| {
self.get_custodian_asset(CKB_SUDT_SCRIPT_ARGS.into())
.expect("get custodian asset")
})
.borrow_mut();
*ckb_balance = ckb_balance
.checked_add(capacity as u128)
.expect("deposit overflow");
// update sUDT balance
let balance = touched_custodian_assets
.entry(sudt_script_hash)
.or_insert_with(|| {
self.get_custodian_asset(sudt_script_hash)
.expect("get custodian asset")
})
.borrow_mut();
*balance = balance.checked_add(amount).expect("deposit overflow");
}
for request in removed {
let CustodianChange {
sudt_script_hash,
amount,
capacity,
} = request;
// update ckb balance
let ckb_balance = touched_custodian_assets
.entry(CKB_SUDT_SCRIPT_ARGS.into())
.or_insert_with(|| {
self.get_custodian_asset(CKB_SUDT_SCRIPT_ARGS.into())
.expect("get custodian asset")
})
.borrow_mut();
*ckb_balance = ckb_balance
.checked_sub(capacity as u128)
.expect("withdrawal overflow");
// update sUDT balance
let balance = touched_custodian_assets
.entry(sudt_script_hash)
.or_insert_with(|| {
self.get_custodian_asset(sudt_script_hash)
.expect("get custodian asset")
})
.borrow_mut();
*balance = balance.checked_sub(amount).expect("withdrawal overflow");
}
// write touched assets to storage
for (key, balance) in touched_custodian_assets {
self.set_custodian_asset(key, balance)?;
}
Ok(())
}
/// Attach block to the rollup main chain
pub fn attach_block(&self, block: packed::L2Block) -> Result<(), Error> {
let raw = block.raw();
let raw_number = raw.number();
let block_hash = raw.hash();
// build tx info
for (index, tx) in block.transactions().into_iter().enumerate() {
let key = TransactionKey::build_transaction_key(block_hash.pack(), index as u32);
let info = packed::TransactionInfo::new_builder()
.key(key)
.block_number(raw_number.clone())
.build();
let tx_hash = tx.hash();
self.insert_raw(COLUMN_TRANSACTION_INFO, &tx_hash, info.as_slice())?;
}
// update custodian assets
let deposit_assets = self
.get_block_deposition_requests(&block_hash.into())?
.expect("deposits")
.into_iter()
.map(|deposit| CustodianChange {
sudt_script_hash: deposit.sudt_script_hash().unpack(),
amount: deposit.amount().unpack(),
capacity: deposit.capacity().unpack(),
});
let withdrawal_assets = block.withdrawals().into_iter().map(|withdrawal| {
let raw = withdrawal.raw();
CustodianChange {
sudt_script_hash: raw.sudt_script_hash().unpack(),
amount: raw.amount().unpack(),
capacity: raw.capacity().unpack(),
}
});
self.update_custodian_assets(deposit_assets, withdrawal_assets)?;
// build main chain index
self.insert_raw(COLUMN_INDEX, raw_number.as_slice(), &block_hash)?;
self.insert_raw(COLUMN_INDEX, &block_hash, raw_number.as_slice())?;
// update block tree
let mut block_smt = self.block_smt()?;
block_smt
.update(raw.smt_key().into(), raw.hash().into())
.map_err(|err| Error::from(format!("SMT error {}", err)))?;
let root = block_smt.root();
self.set_block_smt_root(*root)?;
// update tip
self.insert_raw(COLUMN_META, &META_TIP_BLOCK_HASH_KEY, &block_hash)?;
self.prune_block_state_record(raw_number.unpack())?;
Ok(())
}
pub fn detach_block(&self, block: &packed::L2Block) -> Result<(), Error> {
// remove transaction info
for tx in block.transactions().into_iter() {
let tx_hash = tx.hash();
self.delete(COLUMN_TRANSACTION_INFO, &tx_hash)?;
}
// update custodian assets
let deposit_assets = self
.get_block_deposition_requests(&block.hash().into())?
.expect("deposits")
.into_iter()
.map(|deposit| CustodianChange {
sudt_script_hash: deposit.sudt_script_hash().unpack(),
amount: deposit.amount().unpack(),
capacity: deposit.capacity().unpack(),
});
let withdrawal_assets = block.withdrawals().into_iter().map(|withdrawal| {
let raw = withdrawal.raw();
CustodianChange {
sudt_script_hash: raw.sudt_script_hash().unpack(),
amount: raw.amount().unpack(),
capacity: raw.capacity().unpack(),
}
});
self.update_custodian_assets(withdrawal_assets, deposit_assets)?;
let block_number = block.raw().number();
self.delete(COLUMN_INDEX, block_number.as_slice())?;
self.delete(COLUMN_INDEX, &block.hash())?;
// update block tree
let mut block_smt = self.block_smt()?;
block_smt
.update(block.smt_key().into(), H256::zero())
.map_err(|err| Error::from(format!("SMT error {}", err)))?;
let root = block_smt.root();
self.set_block_smt_root(*root)?;
// update tip
let block_number: u64 = block_number.unpack();
let parent_number = block_number.saturating_sub(1);
let parent_block_hash = self
.get_block_hash_by_number(parent_number)?
.expect("parent block hash");
self.insert_raw(
COLUMN_META,
&META_TIP_BLOCK_HASH_KEY,
parent_block_hash.as_slice(),
)?;
self.clear_block_state(block.hash().into())?;
Ok(())
}
pub fn record_block_state(
&self,
block_hash: &H256,
tx_index: u32,
col: Col,
raw_key: &[u8],
) -> Result<(), Error> {
let record_key = BlockStateRecordKey::new(block_hash, tx_index, col);
self.insert_raw(COLUMN_BLOCK_STATE_RECORD, record_key.as_slice(), raw_key)
}
fn prune_block_state_record(&self, current_block_number: u64) -> Result<(), Error> {
if current_block_number <= NUMBER_OF_CONFIRMATION {
return Ok(());
}
let to_be_pruned_block_number = current_block_number - NUMBER_OF_CONFIRMATION - 1;
let block_hash = self.get_block_hash_by_number(to_be_pruned_block_number)?;
let block_hash = match block_hash {
Some(block_hash) => block_hash,
None if to_be_pruned_block_number == 0 => return Ok(()),
_ => return Err(Error::from("Invalid block hash".to_owned())),
};
self.clear_block_state_record(block_hash)
}
pub(crate) fn clear_block_state_record(&self, block_hash: H256) -> Result<(), Error> {
let iter = self.iter_block_state_record(block_hash);
for (record_key, _) in iter {
self.delete(COLUMN_BLOCK_STATE_RECORD, record_key.as_slice())?;
}
Ok(())
}
pub(crate) fn clear_block_state(&self, block_hash: H256) -> Result<(), Error> {
let iter = self.iter_block_state_record(block_hash);
for (record_key, state_key) in iter {
let column = record_key.get_column();
self.delete(column, &state_key)?;
self.delete(COLUMN_BLOCK_STATE_RECORD, record_key.as_slice())?;
}
Ok(())
}
fn iter_block_state_record(
&self,
block_hash: H256,
) -> impl Iterator<Item = (BlockStateRecordKey, Box<[u8]>)> + '_ {
let start_key = BlockStateRecordKey::new(&block_hash, 0u32, 0u8);
self.get_iter(
COLUMN_BLOCK_STATE_RECORD,
IteratorMode::From(start_key.as_slice(), Forward),
)
.map(|(key, value)| (BlockStateRecordKey::from_slice(&key), value))
.take_while(move |(key, _)| key.is_same_block(block_hash))
}
}
struct CustodianChange {
capacity: u64,
sudt_script_hash: H256,
amount: u128,
}
// block_hash(32 bytes) | tx_index(4 bytes) | col (1 byte)
struct BlockStateRecordKey([u8; 37]);
impl BlockStateRecordKey {
fn new(block_hash: &H256, tx_index: u32, col: Col) -> Self {
let mut key = [0; 37];
key[..32].copy_from_slice(block_hash.as_slice());
key[32..36].copy_from_slice(&tx_index.to_be_bytes());
key[36] = col;
BlockStateRecordKey(key)
}
fn from_slice(record_key: &[u8]) -> Self {
let mut key = [0; 37];
key.copy_from_slice(record_key);
BlockStateRecordKey(key)
}
fn get_column(&self) -> u8 {
self.0[36]
}
fn is_same_block(&self, block_hash: H256) -> bool {
&self.0[..32] == block_hash.as_slice()
}
fn as_slice(&self) -> &[u8] {
&self.0[..]
}
}
| 35.501718 | 100 | 0.569645 |
febabcca15b59d750bf83abc69f152fd73caa64d | 28,129 | use crate::data::DataMap;
use crate::visit::EdgeCount;
use crate::visit::EdgeRef;
use crate::visit::GetAdjacencyMatrix;
use crate::visit::GraphBase;
use crate::visit::GraphProp;
use crate::visit::IntoEdgesDirected;
use crate::visit::IntoNeighborsDirected;
use crate::visit::NodeCompactIndexable;
use crate::{Incoming, Outgoing};
use self::semantic::EdgeMatcher;
use self::semantic::NoSemanticMatch;
use self::semantic::NodeMatcher;
use self::state::Vf2State;
mod state {
use super::*;
#[derive(Debug)]
// TODO: make mapping generic over the index type of the other graph.
pub struct Vf2State<'a, G: GetAdjacencyMatrix> {
/// A reference to the graph this state was built from.
pub graph: &'a G,
/// The current mapping M(s) of nodes from G0 → G1 and G1 → G0,
/// `usize::MAX` for no mapping.
pub mapping: Vec<usize>,
/// out[i] is non-zero if i is in either M_0(s) or Tout_0(s)
/// These are all the next vertices that are not mapped yet, but
/// have an outgoing edge from the mapping.
out: Vec<usize>,
/// ins[i] is non-zero if i is in either M_0(s) or Tin_0(s)
/// These are all the incoming vertices, those not mapped yet, but
/// have an edge from them into the mapping.
/// Unused if graph is undirected -- it's identical with out in that case.
ins: Vec<usize>,
pub out_size: usize,
pub ins_size: usize,
pub adjacency_matrix: G::AdjMatrix,
generation: usize,
}
impl<'a, G> Vf2State<'a, G>
where
G: GetAdjacencyMatrix + GraphProp + NodeCompactIndexable + IntoNeighborsDirected,
{
pub fn new(g: &'a G) -> Self {
let c0 = g.node_count();
Vf2State {
graph: g,
mapping: vec![std::usize::MAX; c0],
out: vec![0; c0],
ins: vec![0; c0 * (g.is_directed() as usize)],
out_size: 0,
ins_size: 0,
adjacency_matrix: g.adjacency_matrix(),
generation: 0,
}
}
/// Return **true** if we have a complete mapping
pub fn is_complete(&self) -> bool {
self.generation == self.mapping.len()
}
/// Add mapping **from** <-> **to** to the state.
pub fn push_mapping(&mut self, from: G::NodeId, to: usize) {
self.generation += 1;
self.mapping[self.graph.to_index(from)] = to;
// update T0 & T1 ins/outs
// T0out: Node in G0 not in M0 but successor of a node in M0.
// st.out[0]: Node either in M0 or successor of M0
for ix in self.graph.neighbors_directed(from, Outgoing) {
if self.out[self.graph.to_index(ix)] == 0 {
self.out[self.graph.to_index(ix)] = self.generation;
self.out_size += 1;
}
}
if self.graph.is_directed() {
for ix in self.graph.neighbors_directed(from, Incoming) {
if self.ins[self.graph.to_index(ix)] == 0 {
self.ins[self.graph.to_index(ix)] = self.generation;
self.ins_size += 1;
}
}
}
}
/// Restore the state to before the last added mapping
pub fn pop_mapping(&mut self, from: G::NodeId) {
// undo (n, m) mapping
self.mapping[self.graph.to_index(from)] = std::usize::MAX;
// unmark in ins and outs
for ix in self.graph.neighbors_directed(from, Outgoing) {
if self.out[self.graph.to_index(ix)] == self.generation {
self.out[self.graph.to_index(ix)] = 0;
self.out_size -= 1;
}
}
if self.graph.is_directed() {
for ix in self.graph.neighbors_directed(from, Incoming) {
if self.ins[self.graph.to_index(ix)] == self.generation {
self.ins[self.graph.to_index(ix)] = 0;
self.ins_size -= 1;
}
}
}
self.generation -= 1;
}
/// Find the next (least) node in the Tout set.
pub fn next_out_index(&self, from_index: usize) -> Option<usize> {
self.out[from_index..]
.iter()
.enumerate()
.find(move |&(index, &elt)| {
elt > 0 && self.mapping[from_index + index] == std::usize::MAX
})
.map(|(index, _)| index)
}
/// Find the next (least) node in the Tin set.
pub fn next_in_index(&self, from_index: usize) -> Option<usize> {
if !self.graph.is_directed() {
return None;
}
self.ins[from_index..]
.iter()
.enumerate()
.find(move |&(index, &elt)| {
elt > 0 && self.mapping[from_index + index] == std::usize::MAX
})
.map(|(index, _)| index)
}
/// Find the next (least) node in the N - M set.
pub fn next_rest_index(&self, from_index: usize) -> Option<usize> {
self.mapping[from_index..]
.iter()
.enumerate()
.find(|&(_, &elt)| elt == std::usize::MAX)
.map(|(index, _)| index)
}
}
}
mod semantic {
use super::*;
pub struct NoSemanticMatch;
pub trait NodeMatcher<G0: GraphBase, G1: GraphBase> {
fn enabled() -> bool;
fn eq(&mut self, _g0: &G0, _g1: &G1, _n0: G0::NodeId, _n1: G1::NodeId) -> bool;
}
impl<G0: GraphBase, G1: GraphBase> NodeMatcher<G0, G1> for NoSemanticMatch {
#[inline]
fn enabled() -> bool {
false
}
#[inline]
fn eq(&mut self, _g0: &G0, _g1: &G1, _n0: G0::NodeId, _n1: G1::NodeId) -> bool {
true
}
}
impl<G0, G1, F> NodeMatcher<G0, G1> for F
where
G0: GraphBase + DataMap,
G1: GraphBase + DataMap,
F: FnMut(&G0::NodeWeight, &G1::NodeWeight) -> bool,
{
#[inline]
fn enabled() -> bool {
true
}
#[inline]
fn eq(&mut self, g0: &G0, g1: &G1, n0: G0::NodeId, n1: G1::NodeId) -> bool {
if let (Some(x), Some(y)) = (g0.node_weight(n0), g1.node_weight(n1)) {
self(x, y)
} else {
false
}
}
}
pub trait EdgeMatcher<G0: GraphBase, G1: GraphBase> {
fn enabled() -> bool;
fn eq(
&mut self,
_g0: &G0,
_g1: &G1,
e0: (G0::NodeId, G0::NodeId),
e1: (G1::NodeId, G1::NodeId),
) -> bool;
}
impl<G0: GraphBase, G1: GraphBase> EdgeMatcher<G0, G1> for NoSemanticMatch {
#[inline]
fn enabled() -> bool {
false
}
#[inline]
fn eq(
&mut self,
_g0: &G0,
_g1: &G1,
_e0: (G0::NodeId, G0::NodeId),
_e1: (G1::NodeId, G1::NodeId),
) -> bool {
true
}
}
impl<G0, G1, F> EdgeMatcher<G0, G1> for F
where
G0: GraphBase + DataMap + IntoEdgesDirected,
G1: GraphBase + DataMap + IntoEdgesDirected,
F: FnMut(&G0::EdgeWeight, &G1::EdgeWeight) -> bool,
{
#[inline]
fn enabled() -> bool {
true
}
#[inline]
fn eq(
&mut self,
g0: &G0,
g1: &G1,
e0: (G0::NodeId, G0::NodeId),
e1: (G1::NodeId, G1::NodeId),
) -> bool {
let w0 = g0
.edges_directed(e0.0, Outgoing)
.find(|edge| edge.target() == e0.1)
.and_then(|edge| g0.edge_weight(edge.id()));
let w1 = g1
.edges_directed(e1.0, Outgoing)
.find(|edge| edge.target() == e1.1)
.and_then(|edge| g1.edge_weight(edge.id()));
if let (Some(x), Some(y)) = (w0, w1) {
self(x, y)
} else {
false
}
}
}
}
mod matching {
use super::*;
#[derive(Copy, Clone, PartialEq, Debug)]
enum OpenList {
Out,
In,
Other,
}
#[derive(Clone, PartialEq, Debug)]
enum Frame<G0, G1>
where
G0: GraphBase,
G1: GraphBase,
{
Outer,
Inner {
nodes: (G0::NodeId, G1::NodeId),
open_list: OpenList,
},
Unwind {
nodes: (G0::NodeId, G1::NodeId),
open_list: OpenList,
},
}
fn is_feasible<G0, G1, NM, EM>(
st: &mut (Vf2State<'_, G0>, Vf2State<'_, G1>),
nodes: (G0::NodeId, G1::NodeId),
node_match: &mut NM,
edge_match: &mut EM,
) -> bool
where
G0: GetAdjacencyMatrix + GraphProp + NodeCompactIndexable + IntoNeighborsDirected,
G1: GetAdjacencyMatrix + GraphProp + NodeCompactIndexable + IntoNeighborsDirected,
NM: NodeMatcher<G0, G1>,
EM: EdgeMatcher<G0, G1>,
{
macro_rules! field {
($x:ident, 0) => {
$x.0
};
($x:ident, 1) => {
$x.1
};
($x:ident, 1 - 0) => {
$x.1
};
($x:ident, 1 - 1) => {
$x.0
};
}
macro_rules! r_succ {
($j:tt) => {{
let mut succ_count = 0;
for n_neigh in field!(st, $j)
.graph
.neighbors_directed(field!(nodes, $j), Outgoing)
{
succ_count += 1;
// handle the self loop case; it's not in the mapping (yet)
let m_neigh = if field!(nodes, $j) != n_neigh {
field!(st, $j).mapping[field!(st, $j).graph.to_index(n_neigh)]
} else {
field!(st, 1 - $j).graph.to_index(field!(nodes, 1 - $j))
};
if m_neigh == std::usize::MAX {
continue;
}
let has_edge = field!(st, 1 - $j).graph.is_adjacent(
&field!(st, 1 - $j).adjacency_matrix,
field!(nodes, 1 - $j),
field!(st, 1 - $j).graph.from_index(m_neigh),
);
if !has_edge {
return false;
}
}
succ_count
}};
}
macro_rules! r_pred {
($j:tt) => {{
let mut pred_count = 0;
for n_neigh in field!(st, $j)
.graph
.neighbors_directed(field!(nodes, $j), Incoming)
{
pred_count += 1;
// the self loop case is handled in outgoing
let m_neigh = field!(st, $j).mapping[field!(st, $j).graph.to_index(n_neigh)];
if m_neigh == std::usize::MAX {
continue;
}
let has_edge = field!(st, 1 - $j).graph.is_adjacent(
&field!(st, 1 - $j).adjacency_matrix,
field!(st, 1 - $j).graph.from_index(m_neigh),
field!(nodes, 1 - $j),
);
if !has_edge {
return false;
}
}
pred_count
}};
}
// Check syntactic feasibility of mapping by ensuring adjacencies
// of nx map to adjacencies of mx.
//
// nx == map to => mx
//
// R_succ
//
// Check that every neighbor of nx is mapped to a neighbor of mx,
// then check the reverse, from mx to nx. Check that they have the same
// count of edges.
//
// Note: We want to check the lookahead measures here if we can,
// R_out: Equal for G0, G1: Card(Succ(G, n) ^ Tout); for both Succ and Pred
// R_in: Same with Tin
// R_new: Equal for G0, G1: Ñ n Pred(G, n); both Succ and Pred,
// Ñ is G0 - M - Tin - Tout
// last attempt to add these did not speed up any of the testcases
if r_succ!(0) > r_succ!(1) {
return false;
}
// R_pred
if st.0.graph.is_directed() && r_pred!(0) > r_pred!(1) {
return false;
}
// // semantic feasibility: compare associated data for nodes
if NM::enabled() && !node_match.eq(st.0.graph, st.1.graph, nodes.0, nodes.1) {
return false;
}
// semantic feasibility: compare associated data for edges
if EM::enabled() {
macro_rules! edge_feasibility {
($j:tt) => {{
for n_neigh in field!(st, $j)
.graph
.neighbors_directed(field!(nodes, $j), Outgoing)
{
let m_neigh = if field!(nodes, $j) != n_neigh {
field!(st, $j).mapping[field!(st, $j).graph.to_index(n_neigh)]
} else {
field!(st, 1 - $j).graph.to_index(field!(nodes, 1 - $j))
};
if m_neigh == std::usize::MAX {
continue;
}
let e0 = (field!(nodes, $j), n_neigh);
let e1 = (
field!(nodes, 1 - $j),
field!(st, 1 - $j).graph.from_index(m_neigh),
);
let edges = (e0, e1);
if !edge_match.eq(
st.0.graph,
st.1.graph,
field!(edges, $j),
field!(edges, 1 - $j),
) {
return false;
}
}
if field!(st, $j).graph.is_directed() {
for n_neigh in field!(st, $j)
.graph
.neighbors_directed(field!(nodes, $j), Incoming)
{
// the self loop case is handled in outgoing
let m_neigh =
field!(st, $j).mapping[field!(st, $j).graph.to_index(n_neigh)];
if m_neigh == std::usize::MAX {
continue;
}
let e0 = (n_neigh, field!(nodes, $j));
let e1 = (
field!(st, 1 - $j).graph.from_index(m_neigh),
field!(nodes, 1 - $j),
);
let edges = (e0, e1);
if !edge_match.eq(
st.0.graph,
st.1.graph,
field!(edges, $j),
field!(edges, 1 - $j),
) {
return false;
}
}
}
}};
}
edge_feasibility!(0);
edge_feasibility!(1);
}
true
}
fn next_candidate<G0, G1>(
st: &mut (Vf2State<'_, G0>, Vf2State<'_, G1>),
) -> Option<(G0::NodeId, G1::NodeId, OpenList)>
where
G0: GetAdjacencyMatrix + GraphProp + NodeCompactIndexable + IntoNeighborsDirected,
G1: GetAdjacencyMatrix + GraphProp + NodeCompactIndexable + IntoNeighborsDirected,
{
let mut from_index = None;
let mut open_list = OpenList::Out;
let mut to_index = st.1.next_out_index(0);
// Try the out list
if to_index.is_some() {
from_index = st.0.next_out_index(0);
open_list = OpenList::Out;
}
// Try the in list
if to_index.is_none() || from_index.is_none() {
to_index = st.1.next_in_index(0);
if to_index.is_some() {
from_index = st.0.next_in_index(0);
open_list = OpenList::In;
}
}
// Try the other list -- disconnected graph
if to_index.is_none() || from_index.is_none() {
to_index = st.1.next_rest_index(0);
if to_index.is_some() {
from_index = st.0.next_rest_index(0);
open_list = OpenList::Other;
}
}
match (from_index, to_index) {
(Some(n), Some(m)) => Some((
st.0.graph.from_index(n),
st.1.graph.from_index(m),
open_list,
)),
// No more candidates
_ => None,
}
}
fn next_from_ix<G0, G1>(
st: &mut (Vf2State<'_, G0>, Vf2State<'_, G1>),
nx: G0::NodeId,
open_list: OpenList,
) -> Option<G0::NodeId>
where
G0: GetAdjacencyMatrix + GraphProp + NodeCompactIndexable + IntoNeighborsDirected,
G1: GetAdjacencyMatrix + GraphProp + NodeCompactIndexable + IntoNeighborsDirected,
{
// Find the next node index to try on the `from` side of the mapping
let start = st.0.graph.to_index(nx) + 1;
let cand0 = match open_list {
OpenList::Out => st.0.next_out_index(start),
OpenList::In => st.0.next_in_index(start),
OpenList::Other => st.0.next_rest_index(start),
}
.map(|c| c + start); // compensate for start offset.
match cand0 {
None => None, // no more candidates
Some(ix) => {
debug_assert!(ix >= start);
Some(st.0.graph.from_index(ix))
}
}
}
fn pop_state<G0, G1>(
st: &mut (Vf2State<'_, G0>, Vf2State<'_, G1>),
nodes: (G0::NodeId, G1::NodeId),
) where
G0: GetAdjacencyMatrix + GraphProp + NodeCompactIndexable + IntoNeighborsDirected,
G1: GetAdjacencyMatrix + GraphProp + NodeCompactIndexable + IntoNeighborsDirected,
{
st.0.pop_mapping(nodes.0);
st.1.pop_mapping(nodes.1);
}
fn push_state<G0, G1>(
st: &mut (Vf2State<'_, G0>, Vf2State<'_, G1>),
nodes: (G0::NodeId, G1::NodeId),
) where
G0: GetAdjacencyMatrix + GraphProp + NodeCompactIndexable + IntoNeighborsDirected,
G1: GetAdjacencyMatrix + GraphProp + NodeCompactIndexable + IntoNeighborsDirected,
{
st.0.push_mapping(nodes.0, st.1.graph.to_index(nodes.1));
st.1.push_mapping(nodes.1, st.0.graph.to_index(nodes.0));
}
/// Return Some(bool) if isomorphism is decided, else None.
pub fn try_match<G0, G1, NM, EM>(
mut st: &mut (Vf2State<'_, G0>, Vf2State<'_, G1>),
node_match: &mut NM,
edge_match: &mut EM,
match_subgraph: bool,
) -> Option<bool>
where
G0: NodeCompactIndexable
+ EdgeCount
+ GetAdjacencyMatrix
+ GraphProp
+ IntoNeighborsDirected,
G1: NodeCompactIndexable
+ EdgeCount
+ GetAdjacencyMatrix
+ GraphProp
+ IntoNeighborsDirected,
NM: NodeMatcher<G0, G1>,
EM: EdgeMatcher<G0, G1>,
{
if st.0.is_complete() {
return Some(true);
}
// A "depth first" search of a valid mapping from graph 1 to graph 2
// F(s, n, m) -- evaluate state s and add mapping n <-> m
// Find least T1out node (in st.out[1] but not in M[1])
let mut stack: Vec<Frame<G0, G1>> = vec![Frame::Outer];
while let Some(frame) = stack.pop() {
match frame {
Frame::Unwind { nodes, open_list } => {
pop_state(&mut st, nodes);
match next_from_ix(&mut st, nodes.0, open_list) {
None => continue,
Some(nx) => {
let f = Frame::Inner {
nodes: (nx, nodes.1),
open_list,
};
stack.push(f);
}
}
}
Frame::Outer => match next_candidate(&mut st) {
None => continue,
Some((nx, mx, open_list)) => {
let f = Frame::Inner {
nodes: (nx, mx),
open_list,
};
stack.push(f);
}
},
Frame::Inner { nodes, open_list } => {
if is_feasible(&mut st, nodes, node_match, edge_match) {
push_state(&mut st, nodes);
if st.0.is_complete() {
return Some(true);
}
// Check cardinalities of Tin, Tout sets
if (!match_subgraph
&& st.0.out_size == st.1.out_size
&& st.0.ins_size == st.1.ins_size)
|| (match_subgraph
&& st.0.out_size <= st.1.out_size
&& st.0.ins_size <= st.1.ins_size)
{
let f0 = Frame::Unwind { nodes, open_list };
stack.push(f0);
stack.push(Frame::Outer);
continue;
}
pop_state(&mut st, nodes);
}
match next_from_ix(&mut st, nodes.0, open_list) {
None => continue,
Some(nx) => {
let f = Frame::Inner {
nodes: (nx, nodes.1),
open_list,
};
stack.push(f);
}
}
}
}
}
None
}
}
/// \[Generic\] Return `true` if the graphs `g0` and `g1` are isomorphic.
///
/// Using the VF2 algorithm, only matching graph syntactically (graph
/// structure).
///
/// The graphs should not be multigraphs.
///
/// **Reference**
///
/// * Luigi P. Cordella, Pasquale Foggia, Carlo Sansone, Mario Vento;
/// *A (Sub)Graph Isomorphism Algorithm for Matching Large Graphs*
pub fn is_isomorphic<G0, G1>(g0: G0, g1: G1) -> bool
where
G0: NodeCompactIndexable + EdgeCount + GetAdjacencyMatrix + GraphProp + IntoNeighborsDirected,
G1: NodeCompactIndexable
+ EdgeCount
+ GetAdjacencyMatrix
+ GraphProp<EdgeType = G0::EdgeType>
+ IntoNeighborsDirected,
{
if g0.node_count() != g1.node_count() || g0.edge_count() != g1.edge_count() {
return false;
}
let mut st = (Vf2State::new(&g0), Vf2State::new(&g1));
self::matching::try_match(&mut st, &mut NoSemanticMatch, &mut NoSemanticMatch, false)
.unwrap_or(false)
}
/// \[Generic\] Return `true` if the graphs `g0` and `g1` are isomorphic.
///
/// Using the VF2 algorithm, examining both syntactic and semantic
/// graph isomorphism (graph structure and matching node and edge weights).
///
/// The graphs should not be multigraphs.
pub fn is_isomorphic_matching<G0, G1, NM, EM>(
g0: G0,
g1: G1,
mut node_match: NM,
mut edge_match: EM,
) -> bool
where
G0: NodeCompactIndexable
+ EdgeCount
+ DataMap
+ GetAdjacencyMatrix
+ GraphProp
+ IntoEdgesDirected,
G1: NodeCompactIndexable
+ EdgeCount
+ DataMap
+ GetAdjacencyMatrix
+ GraphProp<EdgeType = G0::EdgeType>
+ IntoEdgesDirected,
NM: FnMut(&G0::NodeWeight, &G1::NodeWeight) -> bool,
EM: FnMut(&G0::EdgeWeight, &G1::EdgeWeight) -> bool,
{
if g0.node_count() != g1.node_count() || g0.edge_count() != g1.edge_count() {
return false;
}
let mut st = (Vf2State::new(&g0), Vf2State::new(&g1));
self::matching::try_match(&mut st, &mut node_match, &mut edge_match, false).unwrap_or(false)
}
/// \[Generic\] Return `true` if `g0` is isomorphic to a subgraph of `g1`.
///
/// Using the VF2 algorithm, only matching graph syntactically (graph
/// structure).
///
/// The graphs should not be multigraphs.
///
/// # Subgraph isomorphism
///
/// (adapted from [`networkx` documentation](https://networkx.github.io/documentation/stable/reference/algorithms/isomorphism.vf2.html))
///
/// Graph theory literature can be ambiguous about the meaning of the above statement,
/// and we seek to clarify it now.
///
/// In the VF2 literature, a mapping **M** is said to be a *graph-subgraph isomorphism*
/// iff **M** is an isomorphism between **G2** and a subgraph of **G1**. Thus, to say
/// that **G1** and **G2** are graph-subgraph isomorphic is to say that a subgraph of
/// **G1** is isomorphic to **G2**.
///
/// Other literature uses the phrase ‘subgraph isomorphic’ as in
/// ‘**G1** does not have a subgraph isomorphic to **G2**’. Another use is as an in adverb
/// for isomorphic. Thus, to say that **G1** and **G2** are subgraph isomorphic is to say
/// that a subgraph of **G1** is isomorphic to **G2**.
///
/// Finally, the term ‘subgraph’ can have multiple meanings. In this context,
/// ‘subgraph’ always means a ‘node-induced subgraph’. Edge-induced subgraph
/// isomorphisms are not directly supported. For subgraphs which are not
/// induced, the term ‘monomorphism’ is preferred over ‘isomorphism’.
///
/// **Reference**
///
/// * Luigi P. Cordella, Pasquale Foggia, Carlo Sansone, Mario Vento;
/// *A (Sub)Graph Isomorphism Algorithm for Matching Large Graphs*
pub fn is_isomorphic_subgraph<G0, G1>(g0: G0, g1: G1) -> bool
where
G0: NodeCompactIndexable + EdgeCount + GetAdjacencyMatrix + GraphProp + IntoNeighborsDirected,
G1: NodeCompactIndexable
+ EdgeCount
+ GetAdjacencyMatrix
+ GraphProp<EdgeType = G0::EdgeType>
+ IntoNeighborsDirected,
{
if g0.node_count() > g1.node_count() || g0.edge_count() > g1.edge_count() {
return false;
}
let mut st = (Vf2State::new(&g0), Vf2State::new(&g1));
self::matching::try_match(&mut st, &mut NoSemanticMatch, &mut NoSemanticMatch, true)
.unwrap_or(false)
}
/// \[Generic\] Return `true` if `g0` is isomorphic to a subgraph of `g1`.
///
/// Using the VF2 algorithm, examining both syntactic and semantic
/// graph isomorphism (graph structure and matching node and edge weights).
///
/// The graphs should not be multigraphs.
pub fn is_isomorphic_subgraph_matching<G0, G1, NM, EM>(
g0: G0,
g1: G1,
mut node_match: NM,
mut edge_match: EM,
) -> bool
where
G0: NodeCompactIndexable
+ EdgeCount
+ DataMap
+ GetAdjacencyMatrix
+ GraphProp
+ IntoEdgesDirected,
G1: NodeCompactIndexable
+ EdgeCount
+ DataMap
+ GetAdjacencyMatrix
+ GraphProp<EdgeType = G0::EdgeType>
+ IntoEdgesDirected,
NM: FnMut(&G0::NodeWeight, &G1::NodeWeight) -> bool,
EM: FnMut(&G0::EdgeWeight, &G1::EdgeWeight) -> bool,
{
if g0.node_count() > g1.node_count() || g0.edge_count() > g1.edge_count() {
return false;
}
let mut st = (Vf2State::new(&g0), Vf2State::new(&g1));
self::matching::try_match(&mut st, &mut node_match, &mut edge_match, true).unwrap_or(false)
}
| 35.293601 | 136 | 0.47979 |
281bedde7452b72d8fe2a5ac5c7c4d359e371d2e | 19,406 | // Copyright 2018 Developers of the Rand project.
// Copyright 2017-2018 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Random number generation traits
//!
//! This crate is mainly of interest to crates publishing implementations of
//! [`RngCore`]. Other users are encouraged to use the [`rand`] crate instead
//! which re-exports the main traits and error types.
//!
//! [`RngCore`] is the core trait implemented by algorithmic pseudo-random number
//! generators and external random-number sources.
//!
//! [`SeedableRng`] is an extension trait for construction from fixed seeds and
//! other random number generators.
//!
//! [`Error`] is provided for error-handling. It is safe to use in `no_std`
//! environments.
//!
//! The [`impls`] and [`le`] sub-modules include a few small functions to assist
//! implementation of [`RngCore`].
//!
//! [`rand`]: https://docs.rs/rand
#![doc(
html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk.png",
html_favicon_url = "https://www.rust-lang.org/favicon.ico",
html_root_url = "https://rust-random.github.io/rand/"
)]
#![deny(missing_docs)]
#![deny(missing_debug_implementations)]
#![doc(test(attr(allow(unused_variables), deny(warnings))))]
#![allow(clippy::unreadable_literal)]
#![cfg_attr(not(feature = "std"), no_std)]
use core::convert::AsMut;
use core::default::Default;
use core::ptr::copy_nonoverlapping;
#[cfg(all(feature = "alloc", not(feature = "std")))] extern crate alloc;
#[cfg(all(feature = "alloc", not(feature = "std")))] use alloc::boxed::Box;
pub use error::Error;
#[cfg(feature = "getrandom")] pub use os::OsRng;
pub mod block;
mod error;
pub mod impls;
pub mod le;
#[cfg(feature = "getrandom")] mod os;
/// The core of a random number generator.
///
/// This trait encapsulates the low-level functionality common to all
/// generators, and is the "back end", to be implemented by generators.
/// End users should normally use the `Rng` trait from the [`rand`] crate,
/// which is automatically implemented for every type implementing `RngCore`.
///
/// Three different methods for generating random data are provided since the
/// optimal implementation of each is dependent on the type of generator. There
/// is no required relationship between the output of each; e.g. many
/// implementations of [`fill_bytes`] consume a whole number of `u32` or `u64`
/// values and drop any remaining unused bytes. The same can happen with the
/// [`next_u32`] and [`next_u64`] methods, implementations may discard some
/// random bits for efficiency.
///
/// The [`try_fill_bytes`] method is a variant of [`fill_bytes`] allowing error
/// handling; it is not deemed sufficiently useful to add equivalents for
/// [`next_u32`] or [`next_u64`] since the latter methods are almost always used
/// with algorithmic generators (PRNGs), which are normally infallible.
///
/// Algorithmic generators implementing [`SeedableRng`] should normally have
/// *portable, reproducible* output, i.e. fix Endianness when converting values
/// to avoid platform differences, and avoid making any changes which affect
/// output (except by communicating that the release has breaking changes).
///
/// Typically implementators will implement only one of the methods available
/// in this trait directly, then use the helper functions from the
/// [`impls`] module to implement the other methods.
///
/// It is recommended that implementations also implement:
///
/// - `Debug` with a custom implementation which *does not* print any internal
/// state (at least, [`CryptoRng`]s should not risk leaking state through
/// `Debug`).
/// - `Serialize` and `Deserialize` (from Serde), preferably making Serde
/// support optional at the crate level in PRNG libs.
/// - `Clone`, if possible.
/// - *never* implement `Copy` (accidental copies may cause repeated values).
/// - *do not* implement `Default` for pseudorandom generators, but instead
/// implement [`SeedableRng`], to guide users towards proper seeding.
/// External / hardware RNGs can choose to implement `Default`.
/// - `Eq` and `PartialEq` could be implemented, but are probably not useful.
///
/// # Example
///
/// A simple example, obviously not generating very *random* output:
///
/// ```
/// #![allow(dead_code)]
/// use rand_core::{RngCore, Error, impls};
///
/// struct CountingRng(u64);
///
/// impl RngCore for CountingRng {
/// fn next_u32(&mut self) -> u32 {
/// self.next_u64() as u32
/// }
///
/// fn next_u64(&mut self) -> u64 {
/// self.0 += 1;
/// self.0
/// }
///
/// fn fill_bytes(&mut self, dest: &mut [u8]) {
/// impls::fill_bytes_via_next(self, dest)
/// }
///
/// fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
/// Ok(self.fill_bytes(dest))
/// }
/// }
/// ```
///
/// [`rand`]: https://docs.rs/rand
/// [`try_fill_bytes`]: RngCore::try_fill_bytes
/// [`fill_bytes`]: RngCore::fill_bytes
/// [`next_u32`]: RngCore::next_u32
/// [`next_u64`]: RngCore::next_u64
pub trait RngCore {
/// Return the next random `u32`.
///
/// RNGs must implement at least one method from this trait directly. In
/// the case this method is not implemented directly, it can be implemented
/// using `self.next_u64() as u32` or via [`impls::next_u32_via_fill`].
fn next_u32(&mut self) -> u32;
/// Return the next random `u64`.
///
/// RNGs must implement at least one method from this trait directly. In
/// the case this method is not implemented directly, it can be implemented
/// via [`impls::next_u64_via_u32`] or via [`impls::next_u64_via_fill`].
fn next_u64(&mut self) -> u64;
/// Fill `dest` with random data.
///
/// RNGs must implement at least one method from this trait directly. In
/// the case this method is not implemented directly, it can be implemented
/// via [`impls::fill_bytes_via_next`] or
/// via [`RngCore::try_fill_bytes`]; if this generator can
/// fail the implementation must choose how best to handle errors here
/// (e.g. panic with a descriptive message or log a warning and retry a few
/// times).
///
/// This method should guarantee that `dest` is entirely filled
/// with new data, and may panic if this is impossible
/// (e.g. reading past the end of a file that is being used as the
/// source of randomness).
fn fill_bytes(&mut self, dest: &mut [u8]);
/// Fill `dest` entirely with random data.
///
/// This is the only method which allows an RNG to report errors while
/// generating random data thus making this the primary method implemented
/// by external (true) RNGs (e.g. `OsRng`) which can fail. It may be used
/// directly to generate keys and to seed (infallible) PRNGs.
///
/// Other than error handling, this method is identical to [`RngCore::fill_bytes`];
/// thus this may be implemented using `Ok(self.fill_bytes(dest))` or
/// `fill_bytes` may be implemented with
/// `self.try_fill_bytes(dest).unwrap()` or more specific error handling.
fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error>;
}
/// A marker trait used to indicate that an [`RngCore`] or [`BlockRngCore`]
/// implementation is supposed to be cryptographically secure.
///
/// *Cryptographically secure generators*, also known as *CSPRNGs*, should
/// satisfy an additional properties over other generators: given the first
/// *k* bits of an algorithm's output
/// sequence, it should not be possible using polynomial-time algorithms to
/// predict the next bit with probability significantly greater than 50%.
///
/// Some generators may satisfy an additional property, however this is not
/// required by this trait: if the CSPRNG's state is revealed, it should not be
/// computationally-feasible to reconstruct output prior to this. Some other
/// generators allow backwards-computation and are consided *reversible*.
///
/// Note that this trait is provided for guidance only and cannot guarantee
/// suitability for cryptographic applications. In general it should only be
/// implemented for well-reviewed code implementing well-regarded algorithms.
///
/// Note also that use of a `CryptoRng` does not protect against other
/// weaknesses such as seeding from a weak entropy source or leaking state.
///
/// [`BlockRngCore`]: block::BlockRngCore
pub trait CryptoRng {}
/// A random number generator that can be explicitly seeded.
///
/// This trait encapsulates the low-level functionality common to all
/// pseudo-random number generators (PRNGs, or algorithmic generators).
///
/// [`rand`]: https://docs.rs/rand
pub trait SeedableRng: Sized {
/// Seed type, which is restricted to types mutably-dereferencable as `u8`
/// arrays (we recommend `[u8; N]` for some `N`).
///
/// It is recommended to seed PRNGs with a seed of at least circa 100 bits,
/// which means an array of `[u8; 12]` or greater to avoid picking RNGs with
/// partially overlapping periods.
///
/// For cryptographic RNG's a seed of 256 bits is recommended, `[u8; 32]`.
///
///
/// # Implementing `SeedableRng` for RNGs with large seeds
///
/// Note that the required traits `core::default::Default` and
/// `core::convert::AsMut<u8>` are not implemented for large arrays
/// `[u8; N]` with `N` > 32. To be able to implement the traits required by
/// `SeedableRng` for RNGs with such large seeds, the newtype pattern can be
/// used:
///
/// ```
/// use rand_core::SeedableRng;
///
/// const N: usize = 64;
/// pub struct MyRngSeed(pub [u8; N]);
/// pub struct MyRng(MyRngSeed);
///
/// impl Default for MyRngSeed {
/// fn default() -> MyRngSeed {
/// MyRngSeed([0; N])
/// }
/// }
///
/// impl AsMut<[u8]> for MyRngSeed {
/// fn as_mut(&mut self) -> &mut [u8] {
/// &mut self.0
/// }
/// }
///
/// impl SeedableRng for MyRng {
/// type Seed = MyRngSeed;
///
/// fn from_seed(seed: MyRngSeed) -> MyRng {
/// MyRng(seed)
/// }
/// }
/// ```
type Seed: Sized + Default + AsMut<[u8]>;
/// Create a new PRNG using the given seed.
///
/// PRNG implementations are allowed to assume that bits in the seed are
/// well distributed. That means usually that the number of one and zero
/// bits are roughly equal, and values like 0, 1 and (size - 1) are unlikely.
/// Note that many non-cryptographic PRNGs will show poor quality output
/// if this is not adhered to. If you wish to seed from simple numbers, use
/// `seed_from_u64` instead.
///
/// All PRNG implementations should be reproducible unless otherwise noted:
/// given a fixed `seed`, the same sequence of output should be produced
/// on all runs, library versions and architectures (e.g. check endianness).
/// Any "value-breaking" changes to the generator should require bumping at
/// least the minor version and documentation of the change.
///
/// It is not required that this function yield the same state as a
/// reference implementation of the PRNG given equivalent seed; if necessary
/// another constructor replicating behaviour from a reference
/// implementation can be added.
///
/// PRNG implementations should make sure `from_seed` never panics. In the
/// case that some special values (like an all zero seed) are not viable
/// seeds it is preferable to map these to alternative constant value(s),
/// for example `0xBAD5EEDu32` or `0x0DDB1A5E5BAD5EEDu64` ("odd biases? bad
/// seed"). This is assuming only a small number of values must be rejected.
fn from_seed(seed: Self::Seed) -> Self;
/// Create a new PRNG using a `u64` seed.
///
/// This is a convenience-wrapper around `from_seed` to allow construction
/// of any `SeedableRng` from a simple `u64` value. It is designed such that
/// low Hamming Weight numbers like 0 and 1 can be used and should still
/// result in good, independent seeds to the PRNG which is returned.
///
/// This **is not suitable for cryptography**, as should be clear given that
/// the input size is only 64 bits.
///
/// Implementations for PRNGs *may* provide their own implementations of
/// this function, but the default implementation should be good enough for
/// all purposes. *Changing* the implementation of this function should be
/// considered a value-breaking change.
fn seed_from_u64(mut state: u64) -> Self {
// We use PCG32 to generate a u32 sequence, and copy to the seed
const MUL: u64 = 6364136223846793005;
const INC: u64 = 11634580027462260723;
let mut seed = Self::Seed::default();
for chunk in seed.as_mut().chunks_mut(4) {
// We advance the state first (to get away from the input value,
// in case it has low Hamming Weight).
state = state.wrapping_mul(MUL).wrapping_add(INC);
// Use PCG output function with to_le to generate x:
let xorshifted = (((state >> 18) ^ state) >> 27) as u32;
let rot = (state >> 59) as u32;
let x = xorshifted.rotate_right(rot).to_le();
unsafe {
let p = &x as *const u32 as *const u8;
copy_nonoverlapping(p, chunk.as_mut_ptr(), chunk.len());
}
}
Self::from_seed(seed)
}
/// Create a new PRNG seeded from another `Rng`.
///
/// This may be useful when needing to rapidly seed many PRNGs from a master
/// PRNG, and to allow forking of PRNGs. It may be considered deterministic.
///
/// The master PRNG should be at least as high quality as the child PRNGs.
/// When seeding non-cryptographic child PRNGs, we recommend using a
/// different algorithm for the master PRNG (ideally a CSPRNG) to avoid
/// correlations between the child PRNGs. If this is not possible (e.g.
/// forking using small non-crypto PRNGs) ensure that your PRNG has a good
/// mixing function on the output or consider use of a hash function with
/// `from_seed`.
///
/// Note that seeding `XorShiftRng` from another `XorShiftRng` provides an
/// extreme example of what can go wrong: the new PRNG will be a clone
/// of the parent.
///
/// PRNG implementations are allowed to assume that a good RNG is provided
/// for seeding, and that it is cryptographically secure when appropriate.
/// As of `rand` 0.7 / `rand_core` 0.5, implementations overriding this
/// method should ensure the implementation satisfies reproducibility
/// (in prior versions this was not required).
///
/// [`rand`]: https://docs.rs/rand
fn from_rng<R: RngCore>(mut rng: R) -> Result<Self, Error> {
let mut seed = Self::Seed::default();
rng.try_fill_bytes(seed.as_mut())?;
Ok(Self::from_seed(seed))
}
/// Creates a new instance of the RNG seeded via [`getrandom`].
///
/// This method is the recommended way to construct non-deterministic PRNGs
/// since it is convenient and secure.
///
/// In case the overhead of using [`getrandom`] to seed *many* PRNGs is an
/// issue, one may prefer to seed from a local PRNG, e.g.
/// `from_rng(thread_rng()).unwrap()`.
///
/// # Panics
///
/// If [`getrandom`] is unable to provide secure entropy this method will panic.
///
/// [`getrandom`]: https://docs.rs/getrandom
#[cfg(feature = "getrandom")]
fn from_entropy() -> Self {
let mut seed = Self::Seed::default();
if let Err(err) = getrandom::getrandom(seed.as_mut()) {
panic!("from_entropy failed: {}", err);
}
Self::from_seed(seed)
}
}
// Implement `RngCore` for references to an `RngCore`.
// Force inlining all functions, so that it is up to the `RngCore`
// implementation and the optimizer to decide on inlining.
impl<'a, R: RngCore + ?Sized> RngCore for &'a mut R {
#[inline(always)]
fn next_u32(&mut self) -> u32 {
(**self).next_u32()
}
#[inline(always)]
fn next_u64(&mut self) -> u64 {
(**self).next_u64()
}
#[inline(always)]
fn fill_bytes(&mut self, dest: &mut [u8]) {
(**self).fill_bytes(dest)
}
#[inline(always)]
fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
(**self).try_fill_bytes(dest)
}
}
// Implement `RngCore` for boxed references to an `RngCore`.
// Force inlining all functions, so that it is up to the `RngCore`
// implementation and the optimizer to decide on inlining.
#[cfg(feature = "alloc")]
impl<R: RngCore + ?Sized> RngCore for Box<R> {
#[inline(always)]
fn next_u32(&mut self) -> u32 {
(**self).next_u32()
}
#[inline(always)]
fn next_u64(&mut self) -> u64 {
(**self).next_u64()
}
#[inline(always)]
fn fill_bytes(&mut self, dest: &mut [u8]) {
(**self).fill_bytes(dest)
}
#[inline(always)]
fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
(**self).try_fill_bytes(dest)
}
}
#[cfg(feature = "std")]
impl std::io::Read for dyn RngCore {
fn read(&mut self, buf: &mut [u8]) -> Result<usize, std::io::Error> {
self.try_fill_bytes(buf)?;
Ok(buf.len())
}
}
// Implement `CryptoRng` for references to an `CryptoRng`.
impl<'a, R: CryptoRng + ?Sized> CryptoRng for &'a mut R {}
// Implement `CryptoRng` for boxed references to an `CryptoRng`.
#[cfg(feature = "alloc")]
impl<R: CryptoRng + ?Sized> CryptoRng for Box<R> {}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_seed_from_u64() {
struct SeedableNum(u64);
impl SeedableRng for SeedableNum {
type Seed = [u8; 8];
fn from_seed(seed: Self::Seed) -> Self {
let mut x = [0u64; 1];
le::read_u64_into(&seed, &mut x);
SeedableNum(x[0])
}
}
const N: usize = 8;
const SEEDS: [u64; N] = [0u64, 1, 2, 3, 4, 8, 16, -1i64 as u64];
let mut results = [0u64; N];
for (i, seed) in SEEDS.iter().enumerate() {
let SeedableNum(x) = SeedableNum::seed_from_u64(*seed);
results[i] = x;
}
for (i1, r1) in results.iter().enumerate() {
let weight = r1.count_ones();
// This is the binomial distribution B(64, 0.5), so chance of
// weight < 20 is binocdf(19, 64, 0.5) = 7.8e-4, and same for
// weight > 44.
assert!(weight >= 20 && weight <= 44);
for (i2, r2) in results.iter().enumerate() {
if i1 == i2 {
continue;
}
let diff_weight = (r1 ^ r2).count_ones();
assert!(diff_weight >= 20);
}
}
// value-breakage test:
assert_eq!(results[0], 5029875928683246316);
}
}
| 39.443089 | 87 | 0.639802 |
abb63e5dc35b1716d668e5b7b71832393cd68924 | 85,321 | use crate::{
accounts::Accounts, accounts_index::Ancestors, instruction_recorder::InstructionRecorder,
log_collector::LogCollector, native_loader::NativeLoader, rent_collector::RentCollector,
};
use log::*;
use serde::{Deserialize, Serialize};
use solana_sdk::{
account::{AccountSharedData, ReadableAccount, WritableAccount},
account_utils::StateMut,
bpf_loader_upgradeable::{self, UpgradeableLoaderState},
feature_set::{
cpi_share_ro_and_exec_accounts, demote_sysvar_write_locks, instructions_sysvar_enabled,
updated_verify_policy, FeatureSet,
},
ic_msg,
instruction::{CompiledInstruction, Instruction, InstructionError, VoterGroup},
keyed_account::{create_keyed_readonly_accounts, KeyedAccount},
message::Message,
native_loader,
process_instruction::{
BpfComputeBudget, ComputeMeter, Executor, InvokeContext, Logger,
ProcessInstructionWithContext,
},
pubkey::Pubkey,
rent::Rent,
system_program,
sysvar::instructions,
transaction::TransactionError,
};
use std::{
cell::{Ref, RefCell},
collections::HashMap,
rc::Rc,
sync::Arc,
};
pub struct Executors {
pub executors: HashMap<Pubkey, Arc<dyn Executor>>,
pub is_dirty: bool,
}
impl Default for Executors {
fn default() -> Self {
Self {
executors: HashMap::default(),
is_dirty: false,
}
}
}
impl Executors {
pub fn insert(&mut self, key: Pubkey, executor: Arc<dyn Executor>) {
let _ = self.executors.insert(key, executor);
self.is_dirty = true;
}
pub fn get(&self, key: &Pubkey) -> Option<Arc<dyn Executor>> {
self.executors.get(key).cloned()
}
}
#[derive(Default, Debug)]
pub struct ExecuteDetailsTimings {
pub serialize_us: u64,
pub create_vm_us: u64,
pub execute_us: u64,
pub deserialize_us: u64,
pub changed_account_count: u64,
pub total_account_count: u64,
pub total_data_size: usize,
pub data_size_changed: usize,
}
impl ExecuteDetailsTimings {
pub fn accumulate(&mut self, other: &ExecuteDetailsTimings) {
self.serialize_us += other.serialize_us;
self.create_vm_us += other.create_vm_us;
self.execute_us += other.execute_us;
self.deserialize_us += other.deserialize_us;
self.changed_account_count += other.changed_account_count;
self.total_account_count += other.total_account_count;
self.total_data_size += other.total_data_size;
self.data_size_changed += other.data_size_changed;
}
}
// The relevant state of an account before an Instruction executes, used
// to verify account integrity after the Instruction completes
#[derive(Clone, Debug, Default)]
pub struct PreAccount {
key: Pubkey,
account: Rc<RefCell<AccountSharedData>>,
changed: bool,
}
impl PreAccount {
pub fn new(key: &Pubkey, account: &AccountSharedData) -> Self {
Self {
key: *key,
account: Rc::new(RefCell::new(account.clone())),
changed: false,
}
}
pub fn verify(
&self,
program_id: &Pubkey,
is_writable: bool,
rent: &Rent,
post: &AccountSharedData,
timings: &mut ExecuteDetailsTimings,
updated_verify_policy: bool,
) -> Result<(), InstructionError> {
let pre = self.account.borrow();
// Only the owner of the account may change owner and
// only if the account is writable and
// only if the account is not executable and
// only if the data is zero-initialized or empty
let owner_changed = pre.owner != post.owner;
if owner_changed
&& (!is_writable // line coverage used to get branch coverage
|| pre.executable
|| *program_id != pre.owner
|| !Self::is_zeroed(&post.data()))
{
return Err(InstructionError::ModifiedProgramId);
}
// An account not assigned to the program cannot have its balance decrease.
if *program_id != pre.owner // line coverage used to get branch coverage
&& pre.lamports > post.lamports
{
return Err(InstructionError::ExternalAccountLamportSpend);
}
// The balance of read-only and executable accounts may not change
let lamports_changed = pre.lamports != post.lamports;
if lamports_changed {
if !is_writable {
return Err(InstructionError::ReadonlyLamportChange);
}
if pre.executable {
return Err(InstructionError::ExecutableLamportChange);
}
}
// Only the system program can change the size of the data
// and only if the system program owns the account
let data_len_changed = pre.data().len() != post.data().len();
if data_len_changed
&& (!system_program::check_id(program_id) // line coverage used to get branch coverage
|| !system_program::check_id(&pre.owner))
{
return Err(InstructionError::AccountDataSizeChanged);
}
// Only the owner may change account data
// and if the account is writable
// and if the account is not executable
if !(*program_id == pre.owner
&& is_writable // line coverage used to get branch coverage
&& !pre.executable)
&& pre.data() != post.data()
{
if pre.executable {
return Err(InstructionError::ExecutableDataModified);
} else if is_writable {
return Err(InstructionError::ExternalAccountDataModified);
} else {
return Err(InstructionError::ReadonlyDataModified);
}
}
// executable is one-way (false->true) and only the account owner may set it.
let executable_changed = pre.executable != post.executable;
if executable_changed {
if !rent.is_exempt(post.lamports, post.data().len()) {
return Err(InstructionError::ExecutableAccountNotRentExempt);
}
let owner = if updated_verify_policy {
post.owner()
} else {
pre.owner()
};
if !is_writable // line coverage used to get branch coverage
|| pre.executable
|| program_id != owner
{
return Err(InstructionError::ExecutableModified);
}
}
// No one modifies rent_epoch (yet).
let rent_epoch_changed = pre.rent_epoch != post.rent_epoch;
if rent_epoch_changed {
return Err(InstructionError::RentEpochModified);
}
timings.total_account_count += 1;
timings.total_data_size += post.data().len();
if owner_changed
|| lamports_changed
|| data_len_changed
|| executable_changed
|| rent_epoch_changed
|| self.changed
{
timings.changed_account_count += 1;
timings.data_size_changed += post.data().len();
}
Ok(())
}
pub fn update(&mut self, account: &AccountSharedData) {
let mut pre = self.account.borrow_mut();
pre.lamports = account.lamports;
pre.owner = account.owner;
pre.executable = account.executable;
if pre.data().len() != account.data().len() {
// Only system account can change data size, copy with alloc
pre.set_data(account.data().clone());
} else {
// Copy without allocate
pre.data_as_mut_slice().clone_from_slice(&account.data());
}
self.changed = true;
}
pub fn key(&self) -> Pubkey {
self.key
}
pub fn lamports(&self) -> u64 {
self.account.borrow().lamports
}
pub fn executable(&self) -> bool {
self.account.borrow().executable
}
pub fn is_zeroed(buf: &[u8]) -> bool {
const ZEROS_LEN: usize = 1024;
static ZEROS: [u8; ZEROS_LEN] = [0; ZEROS_LEN];
let mut chunks = buf.chunks_exact(ZEROS_LEN);
chunks.all(|chunk| chunk == &ZEROS[..])
&& chunks.remainder() == &ZEROS[..chunks.remainder().len()]
}
}
pub struct ThisComputeMeter {
remaining: u64,
}
impl ComputeMeter for ThisComputeMeter {
fn consume(&mut self, amount: u64) -> Result<(), InstructionError> {
let exceeded = self.remaining < amount;
self.remaining = self.remaining.saturating_sub(amount);
if exceeded {
return Err(InstructionError::ComputationalBudgetExceeded);
}
Ok(())
}
fn get_remaining(&self) -> u64 {
self.remaining
}
}
pub struct ThisInvokeContext<'a> {
program_ids: Vec<Pubkey>,
rent: Rent,
pre_accounts: Vec<PreAccount>,
executables: &'a [(Pubkey, Rc<RefCell<AccountSharedData>>)],
account_deps: &'a [(Pubkey, Rc<RefCell<AccountSharedData>>)],
programs: &'a [(Pubkey, ProcessInstructionWithContext)],
logger: Rc<RefCell<dyn Logger>>,
bpf_compute_budget: BpfComputeBudget,
compute_meter: Rc<RefCell<dyn ComputeMeter>>,
executors: Rc<RefCell<Executors>>,
instruction_recorder: Option<InstructionRecorder>,
feature_set: Arc<FeatureSet>,
pub timings: ExecuteDetailsTimings,
account_db: Arc<Accounts>,
ancestors: &'a Ancestors,
#[allow(clippy::type_complexity)]
sysvars: RefCell<Vec<(Pubkey, Option<Rc<Vec<u8>>>)>>,
voter_grp : &'a dyn VoterGroup
}
impl<'a> ThisInvokeContext<'a> {
#[allow(clippy::too_many_arguments)]
pub fn new(
program_id: &Pubkey,
rent: Rent,
pre_accounts: Vec<PreAccount>,
executables: &'a [(Pubkey, Rc<RefCell<AccountSharedData>>)],
account_deps: &'a [(Pubkey, Rc<RefCell<AccountSharedData>>)],
programs: &'a [(Pubkey, ProcessInstructionWithContext)],
log_collector: Option<Rc<LogCollector>>,
bpf_compute_budget: BpfComputeBudget,
executors: Rc<RefCell<Executors>>,
instruction_recorder: Option<InstructionRecorder>,
feature_set: Arc<FeatureSet>,
account_db: Arc<Accounts>,
ancestors: &'a Ancestors,
voter_grp: &'a dyn VoterGroup,
) -> Self {
let mut program_ids = Vec::with_capacity(bpf_compute_budget.max_invoke_depth);
program_ids.push(*program_id);
Self {
program_ids,
rent,
pre_accounts,
executables,
account_deps,
programs,
logger: Rc::new(RefCell::new(ThisLogger { log_collector })),
bpf_compute_budget,
compute_meter: Rc::new(RefCell::new(ThisComputeMeter {
remaining: bpf_compute_budget.max_units,
})),
executors,
instruction_recorder,
feature_set,
timings: ExecuteDetailsTimings::default(),
account_db,
ancestors,
sysvars: RefCell::new(vec![]),
voter_grp,
}
}
}
impl<'a> InvokeContext for ThisInvokeContext<'a> {
fn push(&mut self, key: &Pubkey) -> Result<(), InstructionError> {
if self.program_ids.len() > self.bpf_compute_budget.max_invoke_depth {
return Err(InstructionError::CallDepth);
}
if self.program_ids.contains(key) && self.program_ids.last() != Some(key) {
// Reentrancy not allowed unless caller is calling itself
return Err(InstructionError::ReentrancyNotAllowed);
}
self.program_ids.push(*key);
Ok(())
}
fn pop(&mut self) {
self.program_ids.pop();
}
fn invoke_depth(&self) -> usize {
self.program_ids.len()
}
fn verify_and_update(
&mut self,
message: &Message,
instruction: &CompiledInstruction,
accounts: &[Rc<RefCell<AccountSharedData>>],
caller_write_privileges: Option<&[bool]>,
) -> Result<(), InstructionError> {
match self.program_ids.last() {
Some(program_id) => MessageProcessor::verify_and_update(
message,
instruction,
&mut self.pre_accounts,
accounts,
program_id,
&self.rent,
caller_write_privileges,
&mut self.timings,
self.feature_set.is_active(&demote_sysvar_write_locks::id()),
self.feature_set.is_active(&updated_verify_policy::id()),
),
None => Err(InstructionError::GenericError), // Should never happen
}
}
fn get_caller(&self) -> Result<&Pubkey, InstructionError> {
self.program_ids
.last()
.ok_or(InstructionError::GenericError)
}
fn get_programs(&self) -> &[(Pubkey, ProcessInstructionWithContext)] {
self.programs
}
fn get_logger(&self) -> Rc<RefCell<dyn Logger>> {
self.logger.clone()
}
fn get_bpf_compute_budget(&self) -> &BpfComputeBudget {
&self.bpf_compute_budget
}
fn get_compute_meter(&self) -> Rc<RefCell<dyn ComputeMeter>> {
self.compute_meter.clone()
}
fn add_executor(&self, pubkey: &Pubkey, executor: Arc<dyn Executor>) {
self.executors.borrow_mut().insert(*pubkey, executor);
}
fn get_executor(&self, pubkey: &Pubkey) -> Option<Arc<dyn Executor>> {
self.executors.borrow().get(&pubkey)
}
fn record_instruction(&self, instruction: &Instruction) {
if let Some(recorder) = &self.instruction_recorder {
recorder.record_instruction(instruction.clone());
}
}
fn is_feature_active(&self, feature_id: &Pubkey) -> bool {
self.feature_set.is_active(feature_id)
}
fn get_account(&self, pubkey: &Pubkey) -> Option<Rc<RefCell<AccountSharedData>>> {
if self.is_feature_active(&cpi_share_ro_and_exec_accounts::id()) {
if let Some((_, account)) = self.executables.iter().find(|(key, _)| key == pubkey) {
Some(account.clone())
} else if let Some((_, account)) =
self.account_deps.iter().find(|(key, _)| key == pubkey)
{
Some(account.clone())
} else {
self.pre_accounts
.iter()
.find(|pre| pre.key == *pubkey)
.map(|pre| pre.account.clone())
}
} else {
if let Some(account) = self.pre_accounts.iter().find_map(|pre| {
if pre.key == *pubkey {
Some(pre.account.clone())
} else {
None
}
}) {
return Some(account);
}
self.account_deps.iter().find_map(|(key, account)| {
if key == pubkey {
Some(account.clone())
} else {
None
}
})
}
}
fn update_timing(
&mut self,
serialize_us: u64,
create_vm_us: u64,
execute_us: u64,
deserialize_us: u64,
) {
self.timings.serialize_us += serialize_us;
self.timings.create_vm_us += create_vm_us;
self.timings.execute_us += execute_us;
self.timings.deserialize_us += deserialize_us;
}
fn get_sysvar_data(&self, id: &Pubkey) -> Option<Rc<Vec<u8>>> {
if let Ok(mut sysvars) = self.sysvars.try_borrow_mut() {
// Try share from cache
let mut result = sysvars
.iter()
.find_map(|(key, sysvar)| if id == key { sysvar.clone() } else { None });
if result.is_none() {
// Load it
result = self
.account_db
.load_slow(self.ancestors, id)
.map(|(account, _)| Rc::new(account.data().clone()));
// Cache it
sysvars.push((*id, result.clone()));
}
result
} else {
None
}
}
fn voter_group(&self) -> & dyn VoterGroup {
self.voter_grp
}
}
pub struct ThisLogger {
log_collector: Option<Rc<LogCollector>>,
}
impl Logger for ThisLogger {
fn log_enabled(&self) -> bool {
log_enabled!(log::Level::Info) || self.log_collector.is_some()
}
fn log(&self, message: &str) {
debug!("{}", message);
if let Some(log_collector) = &self.log_collector {
log_collector.log(message);
}
}
}
#[derive(Deserialize, Serialize)]
pub struct MessageProcessor {
#[serde(skip)]
programs: Vec<(Pubkey, ProcessInstructionWithContext)>,
#[serde(skip)]
native_loader: NativeLoader,
}
impl std::fmt::Debug for MessageProcessor {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
#[derive(Debug)]
struct MessageProcessor<'a> {
programs: Vec<String>,
native_loader: &'a NativeLoader,
}
// These are just type aliases for work around of Debug-ing above pointers
type ErasedProcessInstructionWithContext = fn(
&'static Pubkey,
&'static [KeyedAccount<'static>],
&'static [u8],
&'static mut dyn InvokeContext,
) -> Result<(), InstructionError>;
// rustc doesn't compile due to bug without this work around
// https://github.com/rust-lang/rust/issues/50280
// https://users.rust-lang.org/t/display-function-pointer/17073/2
let processor = MessageProcessor {
programs: self
.programs
.iter()
.map(|(pubkey, instruction)| {
let erased_instruction: ErasedProcessInstructionWithContext = *instruction;
format!("{}: {:p}", pubkey, erased_instruction)
})
.collect::<Vec<_>>(),
native_loader: &self.native_loader,
};
write!(f, "{:?}", processor)
}
}
impl Default for MessageProcessor {
fn default() -> Self {
Self {
programs: vec![],
native_loader: NativeLoader::default(),
}
}
}
impl Clone for MessageProcessor {
fn clone(&self) -> Self {
MessageProcessor {
programs: self.programs.clone(),
native_loader: NativeLoader::default(),
}
}
}
#[cfg(RUSTC_WITH_SPECIALIZATION)]
impl ::solana_frozen_abi::abi_example::AbiExample for MessageProcessor {
fn example() -> Self {
// MessageProcessor's fields are #[serde(skip)]-ed and not Serialize
// so, just rely on Default anyway.
MessageProcessor::default()
}
}
impl MessageProcessor {
/// Add a static entrypoint to intercept instructions before the dynamic loader.
pub fn add_program(
&mut self,
program_id: Pubkey,
process_instruction: ProcessInstructionWithContext,
) {
match self.programs.iter_mut().find(|(key, _)| program_id == *key) {
Some((_, processor)) => *processor = process_instruction,
None => self.programs.push((program_id, process_instruction)),
}
}
pub fn add_loader(
&mut self,
program_id: Pubkey,
process_instruction: ProcessInstructionWithContext,
) {
self.add_program(program_id, process_instruction);
}
/// Create the KeyedAccounts that will be passed to the program
fn create_keyed_accounts<'a>(
message: &'a Message,
instruction: &'a CompiledInstruction,
executable_accounts: &'a [(Pubkey, Rc<RefCell<AccountSharedData>>)],
accounts: &'a [Rc<RefCell<AccountSharedData>>],
demote_sysvar_write_locks: bool,
) -> Vec<KeyedAccount<'a>> {
let mut keyed_accounts = create_keyed_readonly_accounts(&executable_accounts);
let mut keyed_accounts2: Vec<_> = instruction
.accounts
.iter()
.map(|&index| {
let is_signer = message.is_signer(index as usize);
let index = index as usize;
let key = &message.account_keys[index];
let account = &accounts[index];
if message.is_writable(index, demote_sysvar_write_locks) {
KeyedAccount::new(key, is_signer, account)
} else {
KeyedAccount::new_readonly(key, is_signer, account)
}
})
.collect();
keyed_accounts.append(&mut keyed_accounts2);
keyed_accounts
}
/// Process an instruction
/// This method calls the instruction's program entrypoint method
fn process_instruction(
&self,
program_id: &Pubkey,
keyed_accounts: &[KeyedAccount],
instruction_data: &[u8],
invoke_context: &mut dyn InvokeContext,
) -> Result<(), InstructionError> {
if let Some(root_account) = keyed_accounts.iter().next() {
let root_id = root_account.unsigned_key();
if native_loader::check_id(&root_account.owner()?) {
for (id, process_instruction) in &self.programs {
if id == root_id {
// Call the builtin program
return process_instruction(
&program_id,
&keyed_accounts[1..],
instruction_data,
invoke_context,
);
}
}
// Call the program via the native loader
return self.native_loader.process_instruction(
&native_loader::id(),
keyed_accounts,
instruction_data,
invoke_context,
);
} else {
let owner_id = &root_account.owner()?;
for (id, process_instruction) in &self.programs {
if id == owner_id {
// Call the program via a builtin loader
return process_instruction(
&program_id,
keyed_accounts,
instruction_data,
invoke_context,
);
}
}
}
}
Err(InstructionError::UnsupportedProgramId)
}
pub fn create_message(
instruction: &Instruction,
keyed_accounts: &[&KeyedAccount],
signers: &[Pubkey],
invoke_context: &Ref<&mut dyn InvokeContext>,
) -> Result<(Message, Pubkey, usize), InstructionError> {
// Check for privilege escalation
for account in instruction.accounts.iter() {
let keyed_account = keyed_accounts
.iter()
.find_map(|keyed_account| {
if &account.pubkey == keyed_account.unsigned_key() {
Some(keyed_account)
} else {
None
}
})
.ok_or_else(|| {
ic_msg!(
invoke_context,
"Instruction references an unknown account {}",
account.pubkey
);
InstructionError::MissingAccount
})?;
// Readonly account cannot become writable
if account.is_writable && !keyed_account.is_writable() {
ic_msg!(
invoke_context,
"{}'s writable privilege escalated",
account.pubkey
);
return Err(InstructionError::PrivilegeEscalation);
}
if account.is_signer && // If message indicates account is signed
!( // one of the following needs to be true:
keyed_account.signer_key().is_some() // Signed in the parent instruction
|| signers.contains(&account.pubkey) // Signed by the program
) {
ic_msg!(
invoke_context,
"{}'s signer privilege escalated",
account.pubkey
);
return Err(InstructionError::PrivilegeEscalation);
}
}
// validate the caller has access to the program account and that it is executable
let program_id = instruction.program_id;
match keyed_accounts
.iter()
.find(|keyed_account| &program_id == keyed_account.unsigned_key())
{
Some(keyed_account) => {
if !keyed_account.executable()? {
ic_msg!(
invoke_context,
"Account {} is not executable",
keyed_account.unsigned_key()
);
return Err(InstructionError::AccountNotExecutable);
}
}
None => {
ic_msg!(invoke_context, "Unknown program {}", program_id);
return Err(InstructionError::MissingAccount);
}
}
let message = Message::new(&[instruction.clone()], None);
let program_id_index = message.instructions[0].program_id_index as usize;
Ok((message, program_id, program_id_index))
}
/// Entrypoint for a cross-program invocation from a native program
pub fn native_invoke(
invoke_context: &mut dyn InvokeContext,
instruction: Instruction,
keyed_accounts: &[&KeyedAccount],
signers_seeds: &[&[&[u8]]],
) -> Result<(), InstructionError> {
let invoke_context = RefCell::new(invoke_context);
let (
message,
executables,
accounts,
account_refs,
caller_write_privileges,
demote_sysvar_write_locks,
) = {
let invoke_context = invoke_context.borrow();
let caller_program_id = invoke_context.get_caller()?;
// Translate and verify caller's data
let signers = signers_seeds
.iter()
.map(|seeds| Pubkey::create_program_address(&seeds, caller_program_id))
.collect::<Result<Vec<_>, solana_sdk::pubkey::PubkeyError>>()?;
let mut caller_write_privileges = keyed_accounts
.iter()
.map(|keyed_account| keyed_account.is_writable())
.collect::<Vec<bool>>();
caller_write_privileges.insert(0, false);
let (message, callee_program_id, _) =
Self::create_message(&instruction, &keyed_accounts, &signers, &invoke_context)?;
let mut accounts = vec![];
let mut account_refs = vec![];
'root: for account_key in message.account_keys.iter() {
for keyed_account in keyed_accounts {
if account_key == keyed_account.unsigned_key() {
accounts.push(Rc::new(keyed_account.account.clone()));
account_refs.push(keyed_account);
continue 'root;
}
}
ic_msg!(
invoke_context,
"Instruction references an unknown account {}",
account_key
);
return Err(InstructionError::MissingAccount);
}
// Process instruction
invoke_context.record_instruction(&instruction);
let program_account =
invoke_context
.get_account(&callee_program_id)
.ok_or_else(|| {
ic_msg!(invoke_context, "Unknown program {}", callee_program_id);
InstructionError::MissingAccount
})?;
if !program_account.borrow().executable {
ic_msg!(
invoke_context,
"Account {} is not executable",
callee_program_id
);
return Err(InstructionError::AccountNotExecutable);
}
let programdata_executable =
if program_account.borrow().owner == bpf_loader_upgradeable::id() {
if let UpgradeableLoaderState::Program {
programdata_address,
} = program_account.borrow().state()?
{
if let Some(account) = invoke_context.get_account(&programdata_address) {
Some((programdata_address, account))
} else {
ic_msg!(
invoke_context,
"Unknown upgradeable programdata account {}",
programdata_address,
);
return Err(InstructionError::MissingAccount);
}
} else {
ic_msg!(
invoke_context,
"Upgradeable program account state not valid {}",
callee_program_id,
);
return Err(InstructionError::MissingAccount);
}
} else {
None
};
let mut executables = vec![(callee_program_id, program_account)];
if let Some(programdata) = programdata_executable {
executables.push(programdata);
}
(
message,
executables,
accounts,
account_refs,
caller_write_privileges,
invoke_context.is_feature_active(&demote_sysvar_write_locks::id()),
)
};
#[allow(clippy::deref_addrof)]
MessageProcessor::process_cross_program_instruction(
&message,
&executables,
&accounts,
&caller_write_privileges,
*(&mut *(invoke_context.borrow_mut())),
)?;
// Copy results back to caller
{
let invoke_context = invoke_context.borrow();
for (i, (account, account_ref)) in accounts.iter().zip(account_refs).enumerate() {
let account = account.borrow();
if message.is_writable(i, demote_sysvar_write_locks) && !account.executable {
account_ref.try_account_ref_mut()?.lamports = account.lamports;
account_ref.try_account_ref_mut()?.owner = account.owner;
if account_ref.data_len()? != account.data().len()
&& account_ref.data_len()? != 0
{
// Only support for `CreateAccount` at this time.
// Need a way to limit total realloc size across multiple CPI calls
ic_msg!(
invoke_context,
"Inner instructions do not support realloc, only SystemProgram::CreateAccount",
);
return Err(InstructionError::InvalidRealloc);
}
account_ref
.try_account_ref_mut()?
.set_data(account.data().clone());
}
}
}
Ok(())
}
/// Process a cross-program instruction
/// This method calls the instruction's program entrypoint function
pub fn process_cross_program_instruction(
message: &Message,
executable_accounts: &[(Pubkey, Rc<RefCell<AccountSharedData>>)],
accounts: &[Rc<RefCell<AccountSharedData>>],
caller_write_privileges: &[bool],
invoke_context: &mut dyn InvokeContext,
) -> Result<(), InstructionError> {
if let Some(instruction) = message.instructions.get(0) {
let program_id = instruction.program_id(&message.account_keys);
// Verify the calling program hasn't misbehaved
invoke_context.verify_and_update(
message,
instruction,
accounts,
Some(caller_write_privileges),
)?;
let demote_sysvar_write_locks =
invoke_context.is_feature_active(&demote_sysvar_write_locks::id());
// Construct keyed accounts
let keyed_accounts = Self::create_keyed_accounts(
message,
instruction,
executable_accounts,
accounts,
demote_sysvar_write_locks,
);
// Invoke callee
invoke_context.push(program_id)?;
let mut message_processor = MessageProcessor::default();
for (program_id, process_instruction) in invoke_context.get_programs().iter() {
message_processor.add_program(*program_id, *process_instruction);
}
let mut result = message_processor.process_instruction(
program_id,
&keyed_accounts,
&instruction.data,
invoke_context,
);
if result.is_ok() {
// Verify the called program has not misbehaved
result = invoke_context.verify_and_update(message, instruction, accounts, None);
}
invoke_context.pop();
result
} else {
// This function is always called with a valid instruction, if that changes return an error
Err(InstructionError::GenericError)
}
}
/// Record the initial state of the accounts so that they can be compared
/// after the instruction is processed
pub fn create_pre_accounts(
message: &Message,
instruction: &CompiledInstruction,
accounts: &[Rc<RefCell<AccountSharedData>>],
) -> Vec<PreAccount> {
let mut pre_accounts = Vec::with_capacity(instruction.accounts.len());
{
let mut work = |_unique_index: usize, account_index: usize| {
let key = &message.account_keys[account_index];
let account = accounts[account_index].borrow();
pre_accounts.push(PreAccount::new(key, &account));
Ok(())
};
let _ = instruction.visit_each_account(&mut work);
}
pre_accounts
}
/// Verify there are no outstanding borrows
pub fn verify_account_references(
accounts: &[(Pubkey, Rc<RefCell<AccountSharedData>>)],
) -> Result<(), InstructionError> {
for (_, account) in accounts.iter() {
account
.try_borrow_mut()
.map_err(|_| InstructionError::AccountBorrowOutstanding)?;
}
Ok(())
}
/// Verify the results of an instruction
pub fn verify(
message: &Message,
instruction: &CompiledInstruction,
pre_accounts: &[PreAccount],
executable_accounts: &[(Pubkey, Rc<RefCell<AccountSharedData>>)],
accounts: &[Rc<RefCell<AccountSharedData>>],
rent: &Rent,
timings: &mut ExecuteDetailsTimings,
demote_sysvar_write_locks: bool,
updated_verify_policy: bool,
) -> Result<(), InstructionError> {
// Verify all executable accounts have zero outstanding refs
Self::verify_account_references(executable_accounts)?;
// Verify the per-account instruction results
let (mut pre_sum, mut post_sum) = (0_u128, 0_u128);
{
let program_id = instruction.program_id(&message.account_keys);
let mut work = |unique_index: usize, account_index: usize| {
{
// Verify account has no outstanding references
let _ = accounts[account_index]
.try_borrow_mut()
.map_err(|_| InstructionError::AccountBorrowOutstanding)?;
}
let account = accounts[account_index].borrow();
pre_accounts[unique_index].verify(
&program_id,
message.is_writable(account_index, demote_sysvar_write_locks),
rent,
&account,
timings,
updated_verify_policy,
)?;
pre_sum += u128::from(pre_accounts[unique_index].lamports());
post_sum += u128::from(account.lamports);
Ok(())
};
instruction.visit_each_account(&mut work)?;
}
// Verify that the total sum of all the lamports did not change
if pre_sum != post_sum {
return Err(InstructionError::UnbalancedInstruction);
}
Ok(())
}
/// Verify the results of a cross-program instruction
#[allow(clippy::too_many_arguments)]
fn verify_and_update(
message: &Message,
instruction: &CompiledInstruction,
pre_accounts: &mut [PreAccount],
accounts: &[Rc<RefCell<AccountSharedData>>],
program_id: &Pubkey,
rent: &Rent,
caller_write_privileges: Option<&[bool]>,
timings: &mut ExecuteDetailsTimings,
demote_sysvar_write_locks: bool,
updated_verify_policy: bool,
) -> Result<(), InstructionError> {
// Verify the per-account instruction results
let (mut pre_sum, mut post_sum) = (0_u128, 0_u128);
let mut work = |_unique_index: usize, account_index: usize| {
if account_index < message.account_keys.len() && account_index < accounts.len() {
let key = &message.account_keys[account_index];
let account = &accounts[account_index];
let is_writable = if let Some(caller_write_privileges) = caller_write_privileges {
caller_write_privileges[account_index]
} else {
message.is_writable(account_index, demote_sysvar_write_locks)
};
// Find the matching PreAccount
for pre_account in pre_accounts.iter_mut() {
if *key == pre_account.key() {
{
// Verify account has no outstanding references
let _ = account
.try_borrow_mut()
.map_err(|_| InstructionError::AccountBorrowOutstanding)?;
}
let account = account.borrow();
pre_account.verify(
&program_id,
is_writable,
&rent,
&account,
timings,
updated_verify_policy,
)?;
pre_sum += u128::from(pre_account.lamports());
post_sum += u128::from(account.lamports);
if is_writable && !pre_account.executable() {
pre_account.update(&account);
}
return Ok(());
}
}
}
Err(InstructionError::MissingAccount)
};
instruction.visit_each_account(&mut work)?;
work(0, instruction.program_id_index as usize)?;
// Verify that the total sum of all the lamports did not change
if pre_sum != post_sum {
return Err(InstructionError::UnbalancedInstruction);
}
Ok(())
}
/// Execute an instruction
/// This method calls the instruction's program entrypoint method and verifies that the result of
/// the call does not violate the bank's accounting rules.
/// The accounts are committed back to the bank only if this function returns Ok(_).
#[allow(clippy::too_many_arguments)]
fn execute_instruction(
&self,
message: &Message,
instruction: &CompiledInstruction,
executable_accounts: &[(Pubkey, Rc<RefCell<AccountSharedData>>)],
accounts: &[Rc<RefCell<AccountSharedData>>],
account_deps: &[(Pubkey, Rc<RefCell<AccountSharedData>>)],
rent_collector: &RentCollector,
log_collector: Option<Rc<LogCollector>>,
executors: Rc<RefCell<Executors>>,
instruction_recorder: Option<InstructionRecorder>,
instruction_index: usize,
feature_set: Arc<FeatureSet>,
bpf_compute_budget: BpfComputeBudget,
timings: &mut ExecuteDetailsTimings,
demote_sysvar_write_locks: bool,
account_db: Arc<Accounts>,
ancestors: &Ancestors,
voter_grp : &dyn VoterGroup
) -> Result<(), InstructionError> {
// Fixup the special instructions key if present
// before the account pre-values are taken care of
if feature_set.is_active(&instructions_sysvar_enabled::id()) {
for (i, key) in message.account_keys.iter().enumerate() {
if instructions::check_id(key) {
let mut mut_account_ref = accounts[i].borrow_mut();
instructions::store_current_index(
mut_account_ref.data_as_mut_slice(),
instruction_index as u16,
);
break;
}
}
}
let pre_accounts = Self::create_pre_accounts(message, instruction, accounts);
let program_id = instruction.program_id(&message.account_keys);
let mut invoke_context = ThisInvokeContext::new(
program_id,
rent_collector.rent,
pre_accounts,
executable_accounts,
account_deps,
&self.programs,
log_collector,
bpf_compute_budget,
executors,
instruction_recorder,
feature_set,
account_db,
ancestors,
voter_grp,
);
let keyed_accounts = Self::create_keyed_accounts(
message,
instruction,
executable_accounts,
accounts,
demote_sysvar_write_locks,
);
self.process_instruction(
program_id,
&keyed_accounts,
&instruction.data,
&mut invoke_context,
)?;
Self::verify(
message,
instruction,
&invoke_context.pre_accounts,
executable_accounts,
accounts,
&rent_collector.rent,
timings,
demote_sysvar_write_locks,
invoke_context.is_feature_active(&updated_verify_policy::id()),
)?;
timings.accumulate(&invoke_context.timings);
Ok(())
}
/// Process a message.
/// This method calls each instruction in the message over the set of loaded Accounts
/// The accounts are committed back to the bank only if every instruction succeeds
#[allow(clippy::too_many_arguments)]
#[allow(clippy::type_complexity)]
pub fn process_message(
&self,
message: &Message,
loaders: &[Vec<(Pubkey, Rc<RefCell<AccountSharedData>>)>],
accounts: &[Rc<RefCell<AccountSharedData>>],
account_deps: &[(Pubkey, Rc<RefCell<AccountSharedData>>)],
rent_collector: &RentCollector,
log_collector: Option<Rc<LogCollector>>,
executors: Rc<RefCell<Executors>>,
instruction_recorders: Option<&[InstructionRecorder]>,
feature_set: Arc<FeatureSet>,
bpf_compute_budget: BpfComputeBudget,
timings: &mut ExecuteDetailsTimings,
account_db: Arc<Accounts>,
ancestors: &Ancestors,
voter_grp: &dyn VoterGroup,
) -> Result<(), TransactionError> {
let demote_sysvar_write_locks = feature_set.is_active(&demote_sysvar_write_locks::id());
for (instruction_index, instruction) in message.instructions.iter().enumerate() {
let instruction_recorder = instruction_recorders
.as_ref()
.map(|recorders| recorders[instruction_index].clone());
self.execute_instruction(
message,
instruction,
&loaders[instruction_index],
accounts,
account_deps,
rent_collector,
log_collector.clone(),
executors.clone(),
instruction_recorder,
instruction_index,
feature_set.clone(),
bpf_compute_budget,
timings,
demote_sysvar_write_locks,
account_db.clone(),
ancestors,
voter_grp,
)
.map_err(|err| TransactionError::InstructionError(instruction_index as u8, err))?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use solana_sdk::{
account::Account,
instruction::{AccountMeta, Instruction, InstructionError},
message::Message,
native_loader::create_loadable_account_for_test,
};
struct MockVoterGroup {
in_group: bool,
}
impl MockVoterGroup {
pub fn new() -> Self {
Self {
in_group: true,
}
}
}
impl VoterGroup for MockVoterGroup {
fn in_group(&self, _: solana_sdk::clock::Slot, _: solana_sdk::hash::Hash, _: solana_sdk::pubkey::Pubkey) -> bool {
self.in_group
}
} #[test]
fn test_invoke_context() {
const MAX_DEPTH: usize = 10;
let mut program_ids = vec![];
let mut keys = vec![];
let mut pre_accounts = vec![];
let mut accounts = vec![];
for i in 0..MAX_DEPTH {
program_ids.push(solana_sdk::pubkey::new_rand());
keys.push(solana_sdk::pubkey::new_rand());
accounts.push(Rc::new(RefCell::new(AccountSharedData::new(
i as u64,
1,
&program_ids[i],
))));
pre_accounts.push(PreAccount::new(&keys[i], &accounts[i].borrow()))
}
let account = AccountSharedData::new(1, 1, &solana_sdk::pubkey::Pubkey::default());
for program_id in program_ids.iter() {
pre_accounts.push(PreAccount::new(program_id, &account.clone()));
}
let ancestors = Ancestors::default();
let mvg = MockVoterGroup::new();
let mut invoke_context = ThisInvokeContext::new(
&program_ids[0],
Rent::default(),
pre_accounts,
&[],
&[],
&[],
None,
BpfComputeBudget::default(),
Rc::new(RefCell::new(Executors::default())),
None,
Arc::new(FeatureSet::all_enabled()),
Arc::new(Accounts::default()),
&ancestors,
&mvg,
);
// Check call depth increases and has a limit
let mut depth_reached = 1;
for program_id in program_ids.iter().skip(1) {
if Err(InstructionError::CallDepth) == invoke_context.push(program_id) {
break;
}
depth_reached += 1;
}
assert_ne!(depth_reached, 0);
assert!(depth_reached < MAX_DEPTH);
// Mock each invocation
for owned_index in (1..depth_reached).rev() {
let not_owned_index = owned_index - 1;
let metas = vec![
AccountMeta::new(keys[not_owned_index], false),
AccountMeta::new(keys[owned_index], false),
];
let message = Message::new(
&[Instruction::new_with_bytes(
program_ids[owned_index],
&[0],
metas,
)],
None,
);
// modify account owned by the program
accounts[owned_index].borrow_mut().data_as_mut_slice()[0] =
(MAX_DEPTH + owned_index) as u8;
let mut these_accounts = accounts[not_owned_index..owned_index + 1].to_vec();
these_accounts.push(Rc::new(RefCell::new(AccountSharedData::new(
1,
1,
&solana_sdk::pubkey::Pubkey::default(),
))));
invoke_context
.verify_and_update(&message, &message.instructions[0], &these_accounts, None)
.unwrap();
assert_eq!(
invoke_context.pre_accounts[owned_index]
.account
.borrow()
.data()[0],
(MAX_DEPTH + owned_index) as u8
);
// modify account not owned by the program
let data = accounts[not_owned_index].borrow_mut().data()[0];
accounts[not_owned_index].borrow_mut().data_as_mut_slice()[0] =
(MAX_DEPTH + not_owned_index) as u8;
assert_eq!(
invoke_context.verify_and_update(
&message,
&message.instructions[0],
&accounts[not_owned_index..owned_index + 1],
None
),
Err(InstructionError::ExternalAccountDataModified)
);
assert_eq!(
invoke_context.pre_accounts[not_owned_index]
.account
.borrow()
.data()[0],
data
);
accounts[not_owned_index].borrow_mut().data_as_mut_slice()[0] = data;
invoke_context.pop();
}
}
#[test]
fn test_is_zeroed() {
const ZEROS_LEN: usize = 1024;
let mut buf = [0; ZEROS_LEN];
assert_eq!(PreAccount::is_zeroed(&buf), true);
buf[0] = 1;
assert_eq!(PreAccount::is_zeroed(&buf), false);
let mut buf = [0; ZEROS_LEN - 1];
assert_eq!(PreAccount::is_zeroed(&buf), true);
buf[0] = 1;
assert_eq!(PreAccount::is_zeroed(&buf), false);
let mut buf = [0; ZEROS_LEN + 1];
assert_eq!(PreAccount::is_zeroed(&buf), true);
buf[0] = 1;
assert_eq!(PreAccount::is_zeroed(&buf), false);
let buf = vec![];
assert_eq!(PreAccount::is_zeroed(&buf), true);
}
#[test]
fn test_verify_account_references() {
let accounts = vec![(
solana_sdk::pubkey::new_rand(),
Rc::new(RefCell::new(AccountSharedData::default())),
)];
assert!(MessageProcessor::verify_account_references(&accounts).is_ok());
let mut _borrowed = accounts[0].1.borrow();
assert_eq!(
MessageProcessor::verify_account_references(&accounts),
Err(InstructionError::AccountBorrowOutstanding)
);
}
struct Change {
program_id: Pubkey,
is_writable: bool,
rent: Rent,
pre: PreAccount,
post: AccountSharedData,
}
impl Change {
pub fn new(owner: &Pubkey, program_id: &Pubkey) -> Self {
Self {
program_id: *program_id,
rent: Rent::default(),
is_writable: true,
pre: PreAccount::new(
&solana_sdk::pubkey::new_rand(),
&AccountSharedData::from(Account {
owner: *owner,
lamports: std::u64::MAX,
data: vec![],
..Account::default()
}),
),
post: AccountSharedData::from(Account {
owner: *owner,
lamports: std::u64::MAX,
..Account::default()
}),
}
}
pub fn read_only(mut self) -> Self {
self.is_writable = false;
self
}
pub fn executable(mut self, pre: bool, post: bool) -> Self {
self.pre.account.borrow_mut().executable = pre;
self.post.executable = post;
self
}
pub fn lamports(mut self, pre: u64, post: u64) -> Self {
self.pre.account.borrow_mut().lamports = pre;
self.post.lamports = post;
self
}
pub fn owner(mut self, post: &Pubkey) -> Self {
self.post.owner = *post;
self
}
pub fn data(mut self, pre: Vec<u8>, post: Vec<u8>) -> Self {
self.pre.account.borrow_mut().set_data(pre);
self.post.set_data(post);
self
}
pub fn rent_epoch(mut self, pre: u64, post: u64) -> Self {
self.pre.account.borrow_mut().rent_epoch = pre;
self.post.rent_epoch = post;
self
}
pub fn verify(&self) -> Result<(), InstructionError> {
self.pre.verify(
&self.program_id,
self.is_writable,
&self.rent,
&self.post,
&mut ExecuteDetailsTimings::default(),
true,
)
}
}
#[test]
fn test_verify_account_changes_owner() {
let system_program_id = system_program::id();
let alice_program_id = solana_sdk::pubkey::new_rand();
let mallory_program_id = solana_sdk::pubkey::new_rand();
assert_eq!(
Change::new(&system_program_id, &system_program_id)
.owner(&alice_program_id)
.verify(),
Ok(()),
"system program should be able to change the account owner"
);
assert_eq!(
Change::new(&system_program_id, &system_program_id)
.owner(&alice_program_id)
.read_only()
.verify(),
Err(InstructionError::ModifiedProgramId),
"system program should not be able to change the account owner of a read-only account"
);
assert_eq!(
Change::new(&mallory_program_id, &system_program_id)
.owner(&alice_program_id)
.verify(),
Err(InstructionError::ModifiedProgramId),
"system program should not be able to change the account owner of a non-system account"
);
assert_eq!(
Change::new(&mallory_program_id, &mallory_program_id)
.owner(&alice_program_id)
.verify(),
Ok(()),
"mallory should be able to change the account owner, if she leaves clear data"
);
assert_eq!(
Change::new(&mallory_program_id, &mallory_program_id)
.owner(&alice_program_id)
.data(vec![42], vec![0])
.verify(),
Ok(()),
"mallory should be able to change the account owner, if she leaves clear data"
);
assert_eq!(
Change::new(&mallory_program_id, &mallory_program_id)
.owner(&alice_program_id)
.executable(true, true)
.data(vec![42], vec![0])
.verify(),
Err(InstructionError::ModifiedProgramId),
"mallory should not be able to change the account owner, if the account executable"
);
assert_eq!(
Change::new(&mallory_program_id, &mallory_program_id)
.owner(&alice_program_id)
.data(vec![42], vec![42])
.verify(),
Err(InstructionError::ModifiedProgramId),
"mallory should not be able to inject data into the alice program"
);
}
#[test]
fn test_verify_account_changes_executable() {
let owner = solana_sdk::pubkey::new_rand();
let mallory_program_id = solana_sdk::pubkey::new_rand();
let system_program_id = system_program::id();
assert_eq!(
Change::new(&owner, &system_program_id)
.executable(false, true)
.verify(),
Err(InstructionError::ExecutableModified),
"system program can't change executable if system doesn't own the account"
);
assert_eq!(
Change::new(&owner, &system_program_id)
.executable(true, true)
.data(vec![1], vec![2])
.verify(),
Err(InstructionError::ExecutableDataModified),
"system program can't change executable data if system doesn't own the account"
);
assert_eq!(
Change::new(&owner, &owner).executable(false, true).verify(),
Ok(()),
"owner should be able to change executable"
);
assert_eq!(
Change::new(&owner, &owner)
.executable(false, true)
.read_only()
.verify(),
Err(InstructionError::ExecutableModified),
"owner can't modify executable of read-only accounts"
);
assert_eq!(
Change::new(&owner, &owner).executable(true, false).verify(),
Err(InstructionError::ExecutableModified),
"owner program can't reverse executable"
);
assert_eq!(
Change::new(&owner, &mallory_program_id)
.executable(false, true)
.verify(),
Err(InstructionError::ExecutableModified),
"malicious Mallory should not be able to change the account executable"
);
assert_eq!(
Change::new(&owner, &owner)
.executable(false, true)
.data(vec![1], vec![2])
.verify(),
Ok(()),
"account data can change in the same instruction that sets the bit"
);
assert_eq!(
Change::new(&owner, &owner)
.executable(true, true)
.data(vec![1], vec![2])
.verify(),
Err(InstructionError::ExecutableDataModified),
"owner should not be able to change an account's data once its marked executable"
);
assert_eq!(
Change::new(&owner, &owner)
.executable(true, true)
.lamports(1, 2)
.verify(),
Err(InstructionError::ExecutableLamportChange),
"owner should not be able to add lamports once marked executable"
);
assert_eq!(
Change::new(&owner, &owner)
.executable(true, true)
.lamports(1, 2)
.verify(),
Err(InstructionError::ExecutableLamportChange),
"owner should not be able to add lamports once marked executable"
);
assert_eq!(
Change::new(&owner, &owner)
.executable(true, true)
.lamports(2, 1)
.verify(),
Err(InstructionError::ExecutableLamportChange),
"owner should not be able to subtract lamports once marked executable"
);
let data = vec![1; 100];
let min_lamports = Rent::default().minimum_balance(data.len());
assert_eq!(
Change::new(&owner, &owner)
.executable(false, true)
.lamports(0, min_lamports)
.data(data.clone(), data.clone())
.verify(),
Ok(()),
);
assert_eq!(
Change::new(&owner, &owner)
.executable(false, true)
.lamports(0, min_lamports - 1)
.data(data.clone(), data)
.verify(),
Err(InstructionError::ExecutableAccountNotRentExempt),
"owner should not be able to change an account's data once its marked executable"
);
}
#[test]
fn test_verify_account_changes_data_len() {
let alice_program_id = solana_sdk::pubkey::new_rand();
assert_eq!(
Change::new(&system_program::id(), &system_program::id())
.data(vec![0], vec![0, 0])
.verify(),
Ok(()),
"system program should be able to change the data len"
);
assert_eq!(
Change::new(&alice_program_id, &system_program::id())
.data(vec![0], vec![0,0])
.verify(),
Err(InstructionError::AccountDataSizeChanged),
"system program should not be able to change the data length of accounts it does not own"
);
}
#[test]
fn test_verify_account_changes_data() {
let alice_program_id = solana_sdk::pubkey::new_rand();
let mallory_program_id = solana_sdk::pubkey::new_rand();
assert_eq!(
Change::new(&alice_program_id, &alice_program_id)
.data(vec![0], vec![42])
.verify(),
Ok(()),
"alice program should be able to change the data"
);
assert_eq!(
Change::new(&mallory_program_id, &alice_program_id)
.data(vec![0], vec![42])
.verify(),
Err(InstructionError::ExternalAccountDataModified),
"non-owner mallory should not be able to change the account data"
);
assert_eq!(
Change::new(&alice_program_id, &alice_program_id)
.data(vec![0], vec![42])
.read_only()
.verify(),
Err(InstructionError::ReadonlyDataModified),
"alice isn't allowed to touch a CO account"
);
}
#[test]
fn test_verify_account_changes_rent_epoch() {
let alice_program_id = solana_sdk::pubkey::new_rand();
assert_eq!(
Change::new(&alice_program_id, &system_program::id()).verify(),
Ok(()),
"nothing changed!"
);
assert_eq!(
Change::new(&alice_program_id, &system_program::id())
.rent_epoch(0, 1)
.verify(),
Err(InstructionError::RentEpochModified),
"no one touches rent_epoch"
);
}
#[test]
fn test_verify_account_changes_deduct_lamports_and_reassign_account() {
let alice_program_id = solana_sdk::pubkey::new_rand();
let bob_program_id = solana_sdk::pubkey::new_rand();
// positive test of this capability
assert_eq!(
Change::new(&alice_program_id, &alice_program_id)
.owner(&bob_program_id)
.lamports(42, 1)
.data(vec![42], vec![0])
.verify(),
Ok(()),
"alice should be able to deduct lamports and give the account to bob if the data is zeroed",
);
}
#[test]
fn test_verify_account_changes_lamports() {
let alice_program_id = solana_sdk::pubkey::new_rand();
assert_eq!(
Change::new(&alice_program_id, &system_program::id())
.lamports(42, 0)
.read_only()
.verify(),
Err(InstructionError::ExternalAccountLamportSpend),
"debit should fail, even if system program"
);
assert_eq!(
Change::new(&alice_program_id, &alice_program_id)
.lamports(42, 0)
.read_only()
.verify(),
Err(InstructionError::ReadonlyLamportChange),
"debit should fail, even if owning program"
);
assert_eq!(
Change::new(&alice_program_id, &system_program::id())
.lamports(42, 0)
.owner(&system_program::id())
.verify(),
Err(InstructionError::ModifiedProgramId),
"system program can't debit the account unless it was the pre.owner"
);
assert_eq!(
Change::new(&system_program::id(), &system_program::id())
.lamports(42, 0)
.owner(&alice_program_id)
.verify(),
Ok(()),
"system can spend (and change owner)"
);
}
#[test]
fn test_verify_account_changes_data_size_changed() {
let alice_program_id = solana_sdk::pubkey::new_rand();
assert_eq!(
Change::new(&alice_program_id, &system_program::id())
.data(vec![0], vec![0, 0])
.verify(),
Err(InstructionError::AccountDataSizeChanged),
"system program should not be able to change another program's account data size"
);
assert_eq!(
Change::new(&alice_program_id, &alice_program_id)
.data(vec![0], vec![0, 0])
.verify(),
Err(InstructionError::AccountDataSizeChanged),
"non-system programs cannot change their data size"
);
assert_eq!(
Change::new(&system_program::id(), &system_program::id())
.data(vec![0], vec![0, 0])
.verify(),
Ok(()),
"system program should be able to change account data size"
);
}
#[test]
fn test_verify_account_changes_owner_executable() {
let alice_program_id = solana_sdk::pubkey::new_rand();
let bob_program_id = solana_sdk::pubkey::new_rand();
assert_eq!(
Change::new(&alice_program_id, &alice_program_id)
.owner(&bob_program_id)
.executable(false, true)
.verify(),
Err(InstructionError::ExecutableModified),
"Program should not be able to change owner and executable at the same time"
);
}
#[test]
fn test_process_message_readonly_handling() {
#[derive(Serialize, Deserialize)]
enum MockSystemInstruction {
Correct,
AttemptCredit { lamports: u64 },
AttemptDataChange { data: u8 },
}
fn mock_system_process_instruction(
_program_id: &Pubkey,
keyed_accounts: &[KeyedAccount],
data: &[u8],
_invoke_context: &mut dyn InvokeContext,
) -> Result<(), InstructionError> {
if let Ok(instruction) = bincode::deserialize(data) {
match instruction {
MockSystemInstruction::Correct => Ok(()),
MockSystemInstruction::AttemptCredit { lamports } => {
keyed_accounts[0].account.borrow_mut().lamports -= lamports;
keyed_accounts[1].account.borrow_mut().lamports += lamports;
Ok(())
}
// Change data in a read-only account
MockSystemInstruction::AttemptDataChange { data } => {
keyed_accounts[1].account.borrow_mut().set_data(vec![data]);
Ok(())
}
}
} else {
Err(InstructionError::InvalidInstructionData)
}
}
let mock_system_program_id = Pubkey::new(&[2u8; 32]);
let rent_collector = RentCollector::default();
let mut message_processor = MessageProcessor::default();
message_processor.add_program(mock_system_program_id, mock_system_process_instruction);
let mut accounts: Vec<Rc<RefCell<AccountSharedData>>> = Vec::new();
let account = AccountSharedData::new_ref(100, 1, &mock_system_program_id);
accounts.push(account);
let account = AccountSharedData::new_ref(0, 1, &mock_system_program_id);
accounts.push(account);
let mut loaders: Vec<Vec<(Pubkey, Rc<RefCell<AccountSharedData>>)>> = Vec::new();
let account = Rc::new(RefCell::new(create_loadable_account_for_test(
"mock_system_program",
)));
loaders.push(vec![(mock_system_program_id, account)]);
let executors = Rc::new(RefCell::new(Executors::default()));
let ancestors = Ancestors::default();
let from_pubkey = solana_sdk::pubkey::new_rand();
let to_pubkey = solana_sdk::pubkey::new_rand();
let account_metas = vec![
AccountMeta::new(from_pubkey, true),
AccountMeta::new_readonly(to_pubkey, false),
];
let message = Message::new(
&[Instruction::new_with_bincode(
mock_system_program_id,
&MockSystemInstruction::Correct,
account_metas.clone(),
)],
Some(&from_pubkey),
);
let mvg = MockVoterGroup::new();
let result = message_processor.process_message(
&message,
&loaders,
&accounts,
&[],
&rent_collector,
None,
executors.clone(),
None,
Arc::new(FeatureSet::all_enabled()),
BpfComputeBudget::new(),
&mut ExecuteDetailsTimings::default(),
Arc::new(Accounts::default()),
&ancestors,
&mvg,
);
assert_eq!(result, Ok(()));
assert_eq!(accounts[0].borrow().lamports, 100);
assert_eq!(accounts[1].borrow().lamports, 0);
let message = Message::new(
&[Instruction::new_with_bincode(
mock_system_program_id,
&MockSystemInstruction::AttemptCredit { lamports: 50 },
account_metas.clone(),
)],
Some(&from_pubkey),
);
let mvg = MockVoterGroup::new();
let result = message_processor.process_message(
&message,
&loaders,
&accounts,
&[],
&rent_collector,
None,
executors.clone(),
None,
Arc::new(FeatureSet::all_enabled()),
BpfComputeBudget::new(),
&mut ExecuteDetailsTimings::default(),
Arc::new(Accounts::default()),
&ancestors,
&mvg,
);
assert_eq!(
result,
Err(TransactionError::InstructionError(
0,
InstructionError::ReadonlyLamportChange
))
);
let message = Message::new(
&[Instruction::new_with_bincode(
mock_system_program_id,
&MockSystemInstruction::AttemptDataChange { data: 50 },
account_metas,
)],
Some(&from_pubkey),
);
let mvg = MockVoterGroup::new();
let result = message_processor.process_message(
&message,
&loaders,
&accounts,
&[],
&rent_collector,
None,
executors,
None,
Arc::new(FeatureSet::all_enabled()),
BpfComputeBudget::new(),
&mut ExecuteDetailsTimings::default(),
Arc::new(Accounts::default()),
&ancestors,
&mvg,
);
assert_eq!(
result,
Err(TransactionError::InstructionError(
0,
InstructionError::ReadonlyDataModified
))
);
}
#[test]
fn test_process_message_duplicate_accounts() {
#[derive(Serialize, Deserialize)]
enum MockSystemInstruction {
BorrowFail,
MultiBorrowMut,
DoWork { lamports: u64, data: u8 },
}
fn mock_system_process_instruction(
_program_id: &Pubkey,
keyed_accounts: &[KeyedAccount],
data: &[u8],
_invoke_context: &mut dyn InvokeContext,
) -> Result<(), InstructionError> {
if let Ok(instruction) = bincode::deserialize(data) {
match instruction {
MockSystemInstruction::BorrowFail => {
let from_account = keyed_accounts[0].try_account_ref_mut()?;
let dup_account = keyed_accounts[2].try_account_ref_mut()?;
if from_account.lamports != dup_account.lamports {
return Err(InstructionError::InvalidArgument);
}
Ok(())
}
MockSystemInstruction::MultiBorrowMut => {
let from_lamports = {
let from_account = keyed_accounts[0].try_account_ref_mut()?;
from_account.lamports
};
let dup_lamports = {
let dup_account = keyed_accounts[2].try_account_ref_mut()?;
dup_account.lamports
};
if from_lamports != dup_lamports {
return Err(InstructionError::InvalidArgument);
}
Ok(())
}
MockSystemInstruction::DoWork { lamports, data } => {
{
let mut to_account = keyed_accounts[1].try_account_ref_mut()?;
let mut dup_account = keyed_accounts[2].try_account_ref_mut()?;
dup_account.lamports -= lamports;
to_account.lamports += lamports;
dup_account.set_data(vec![data]);
}
keyed_accounts[0].try_account_ref_mut()?.lamports -= lamports;
keyed_accounts[1].try_account_ref_mut()?.lamports += lamports;
Ok(())
}
}
} else {
Err(InstructionError::InvalidInstructionData)
}
}
let mock_program_id = Pubkey::new(&[2u8; 32]);
let rent_collector = RentCollector::default();
let mut message_processor = MessageProcessor::default();
message_processor.add_program(mock_program_id, mock_system_process_instruction);
let mut accounts: Vec<Rc<RefCell<AccountSharedData>>> = Vec::new();
let account = AccountSharedData::new_ref(100, 1, &mock_program_id);
accounts.push(account);
let account = AccountSharedData::new_ref(0, 1, &mock_program_id);
accounts.push(account);
let mut loaders: Vec<Vec<(Pubkey, Rc<RefCell<AccountSharedData>>)>> = Vec::new();
let account = Rc::new(RefCell::new(create_loadable_account_for_test(
"mock_system_program",
)));
loaders.push(vec![(mock_program_id, account)]);
let executors = Rc::new(RefCell::new(Executors::default()));
let ancestors = Ancestors::default();
let from_pubkey = solana_sdk::pubkey::new_rand();
let to_pubkey = solana_sdk::pubkey::new_rand();
let dup_pubkey = from_pubkey;
let account_metas = vec![
AccountMeta::new(from_pubkey, true),
AccountMeta::new(to_pubkey, false),
AccountMeta::new(dup_pubkey, false),
];
// Try to borrow mut the same account
let message = Message::new(
&[Instruction::new_with_bincode(
mock_program_id,
&MockSystemInstruction::BorrowFail,
account_metas.clone(),
)],
Some(&from_pubkey),
);
let mvg = MockVoterGroup::new();
let result = message_processor.process_message(
&message,
&loaders,
&accounts,
&[],
&rent_collector,
None,
executors.clone(),
None,
Arc::new(FeatureSet::all_enabled()),
BpfComputeBudget::new(),
&mut ExecuteDetailsTimings::default(),
Arc::new(Accounts::default()),
&ancestors,
&mvg,
);
assert_eq!(
result,
Err(TransactionError::InstructionError(
0,
InstructionError::AccountBorrowFailed
))
);
// Try to borrow mut the same account in a safe way
let message = Message::new(
&[Instruction::new_with_bincode(
mock_program_id,
&MockSystemInstruction::MultiBorrowMut,
account_metas.clone(),
)],
Some(&from_pubkey),
);
let mvg = MockVoterGroup::new();
let result = message_processor.process_message(
&message,
&loaders,
&accounts,
&[],
&rent_collector,
None,
executors.clone(),
None,
Arc::new(FeatureSet::all_enabled()),
BpfComputeBudget::new(),
&mut ExecuteDetailsTimings::default(),
Arc::new(Accounts::default()),
&ancestors,
&mvg,
);
assert_eq!(result, Ok(()));
// Do work on the same account but at different location in keyed_accounts[]
let message = Message::new(
&[Instruction::new_with_bincode(
mock_program_id,
&MockSystemInstruction::DoWork {
lamports: 10,
data: 42,
},
account_metas,
)],
Some(&from_pubkey),
);
let ancestors = Ancestors::default();
let mvg = MockVoterGroup::new();
let result = message_processor.process_message(
&message,
&loaders,
&accounts,
&[],
&rent_collector,
None,
executors,
None,
Arc::new(FeatureSet::all_enabled()),
BpfComputeBudget::new(),
&mut ExecuteDetailsTimings::default(),
Arc::new(Accounts::default()),
&ancestors,
&mvg,
);
assert_eq!(result, Ok(()));
assert_eq!(accounts[0].borrow().lamports, 80);
assert_eq!(accounts[1].borrow().lamports, 20);
assert_eq!(accounts[0].borrow().data(), &vec![42]);
}
#[test]
fn test_process_cross_program() {
#[derive(Debug, Serialize, Deserialize)]
enum MockInstruction {
NoopSuccess,
NoopFail,
ModifyOwned,
ModifyNotOwned,
}
fn mock_process_instruction(
program_id: &Pubkey,
keyed_accounts: &[KeyedAccount],
data: &[u8],
_invoke_context: &mut dyn InvokeContext,
) -> Result<(), InstructionError> {
assert_eq!(*program_id, keyed_accounts[0].owner()?);
assert_ne!(
keyed_accounts[1].owner()?,
*keyed_accounts[0].unsigned_key()
);
if let Ok(instruction) = bincode::deserialize(data) {
match instruction {
MockInstruction::NoopSuccess => (),
MockInstruction::NoopFail => return Err(InstructionError::GenericError),
MockInstruction::ModifyOwned => {
keyed_accounts[0].try_account_ref_mut()?.data_as_mut_slice()[0] = 1
}
MockInstruction::ModifyNotOwned => {
keyed_accounts[1].try_account_ref_mut()?.data_as_mut_slice()[0] = 1
}
}
} else {
return Err(InstructionError::InvalidInstructionData);
}
Ok(())
}
let caller_program_id = solana_sdk::pubkey::new_rand();
let callee_program_id = solana_sdk::pubkey::new_rand();
let mut program_account = AccountSharedData::new(1, 0, &native_loader::id());
program_account.executable = true;
let executable_preaccount = PreAccount::new(&callee_program_id, &program_account);
let executable_accounts = vec![(
callee_program_id,
Rc::new(RefCell::new(program_account.clone())),
)];
let owned_key = solana_sdk::pubkey::new_rand();
let owned_account = AccountSharedData::new(42, 1, &callee_program_id);
let owned_preaccount = PreAccount::new(&owned_key, &owned_account);
let not_owned_key = solana_sdk::pubkey::new_rand();
let not_owned_account = AccountSharedData::new(84, 1, &solana_sdk::pubkey::new_rand());
let not_owned_preaccount = PreAccount::new(¬_owned_key, ¬_owned_account);
#[allow(unused_mut)]
let mut accounts = vec![
Rc::new(RefCell::new(owned_account)),
Rc::new(RefCell::new(not_owned_account)),
Rc::new(RefCell::new(program_account)),
];
let programs: Vec<(_, ProcessInstructionWithContext)> =
vec![(callee_program_id, mock_process_instruction)];
let ancestors = Ancestors::default();
let mvg = MockVoterGroup::new();
let mut invoke_context = ThisInvokeContext::new(
&caller_program_id,
Rent::default(),
vec![
owned_preaccount,
not_owned_preaccount,
executable_preaccount,
],
&[],
&[],
programs.as_slice(),
None,
BpfComputeBudget::default(),
Rc::new(RefCell::new(Executors::default())),
None,
Arc::new(FeatureSet::all_enabled()),
Arc::new(Accounts::default()),
&ancestors,
&mvg,
);
let metas = vec![
AccountMeta::new(owned_key, false),
AccountMeta::new(not_owned_key, false),
];
// not owned account modified by the caller (before the invoke)
accounts[0].borrow_mut().data_as_mut_slice()[0] = 1;
let instruction = Instruction::new_with_bincode(
callee_program_id,
&MockInstruction::NoopSuccess,
metas.clone(),
);
let demote_sysvar_write_locks = true;
let message = Message::new(&[instruction], None);
let caller_write_privileges = message
.account_keys
.iter()
.enumerate()
.map(|(i, _)| message.is_writable(i, demote_sysvar_write_locks))
.collect::<Vec<bool>>();
assert_eq!(
MessageProcessor::process_cross_program_instruction(
&message,
&executable_accounts,
&accounts,
&caller_write_privileges,
&mut invoke_context,
),
Err(InstructionError::ExternalAccountDataModified)
);
accounts[0].borrow_mut().data_as_mut_slice()[0] = 0;
let cases = vec![
(MockInstruction::NoopSuccess, Ok(())),
(
MockInstruction::NoopFail,
Err(InstructionError::GenericError),
),
(MockInstruction::ModifyOwned, Ok(())),
(
MockInstruction::ModifyNotOwned,
Err(InstructionError::ExternalAccountDataModified),
),
];
for case in cases {
let instruction =
Instruction::new_with_bincode(callee_program_id, &case.0, metas.clone());
let message = Message::new(&[instruction], None);
let caller_write_privileges = message
.account_keys
.iter()
.enumerate()
.map(|(i, _)| message.is_writable(i, demote_sysvar_write_locks))
.collect::<Vec<bool>>();
assert_eq!(
MessageProcessor::process_cross_program_instruction(
&message,
&executable_accounts,
&accounts,
&caller_write_privileges,
&mut invoke_context,
),
case.1
);
}
}
#[test]
fn test_debug() {
let mut message_processor = MessageProcessor::default();
#[allow(clippy::unnecessary_wraps)]
fn mock_process_instruction(
_program_id: &Pubkey,
_keyed_accounts: &[KeyedAccount],
_data: &[u8],
_invoke_context: &mut dyn InvokeContext,
) -> Result<(), InstructionError> {
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn mock_ix_processor(
_pubkey: &Pubkey,
_ka: &[KeyedAccount],
_data: &[u8],
_context: &mut dyn InvokeContext,
) -> Result<(), InstructionError> {
Ok(())
}
let program_id = solana_sdk::pubkey::new_rand();
message_processor.add_program(program_id, mock_process_instruction);
message_processor.add_loader(program_id, mock_ix_processor);
assert!(!format!("{:?}", message_processor).is_empty());
}
}
| 36.649914 | 122 | 0.535894 |
e66148ac1197749d0b0a08e575bae1499179f056 | 570 | // A palindromic number reads the same both ways. The largest palindrome made from the product of
// two 2-digit numbers is 9009 = 91 × 99.
// Find the largest palindrome made from the product of two 3-digit numbers.
extern crate project_euler;
use project_euler::palindrome::is_palindrome;
fn main() {
let mut palindromes = Vec::new();
for x in (100..999).rev() {
for y in (100..999).rev() {
let z = x * y;
if is_palindrome(&z.to_string()) {
palindromes.push(z);
}
}
}
println!("{}", palindromes.iter().max().unwrap());
}
| 24.782609 | 97 | 0.638596 |
e5e18d9d49f7fc7974ead0c4a71462a705a86f38 | 1,727 | use serde::Deserialize;
use crate::constants::MONGO_DB_TABLE_COLLECTION_NAME;
use crate::db;
use crate::web::{cookie as get_cookie, Error, Request, Result};
#[derive(Debug, Deserialize)]
struct BalanceQuery {
amount: u32,
player: String,
}
pub async fn set_balance(request: Request) -> Result {
let cook = get_cookie(&request).ok_or(Error::from_str(404, "unauth"))?;
let player = request
.state()
.authority(cook.value())
.await
.and_then(|auth| auth.admin())
.ok_or(Error::from_str(404, ""))?;
let query = request.query::<BalanceQuery>()?;
log::info!(
"admin {} updating player '{}' balance to {}",
player.id,
query.player,
query.amount
);
let players = request.state().players();
players
.update_one(
db::doc! { "id": query.player },
db::doc! { "$set": { "balance": query.amount } },
None,
)
.await
.map_err(|error| {
log::warn!("unable to update balance - {}", error);
Error::from_str(500, "bad-save")
})?;
Ok(format!("").into())
}
pub async fn drop_all(request: Request) -> Result {
let cook = get_cookie(&request).ok_or(Error::from_str(404, "unauth"))?;
let player = request
.state()
.authority(cook.value())
.await
.and_then(|auth| auth.admin())
.ok_or(Error::from_str(404, ""))?;
log::info!("admin {} dropping all tables", player.id);
let collection = request.state().tables();
collection
.drop(None)
.await
.map_err(|error| {
log::warn!("unable to create new table - {:?}", error);
Error::from_str(422, "failed")
})
.map(|_| {
log::info!("successfully dropped '{}'", MONGO_DB_TABLE_COLLECTION_NAME);
format!("").into()
})
}
| 23.986111 | 78 | 0.603358 |
effd176d741f17dfa79539330acd9dbc7684f9d5 | 6,914 | //! Networking code for Zebra.
//!
//! ## Network Protocol Design
//!
//! The Zcash network protocol is inherited from Bitcoin, which uses a
//! stateful network protocol in which messages can arrive in any
//! order (even before a handshake is complete!). The same Bitcoin message
//! may be a request or a response depending on context.
//!
//! ### Achieving Concurrency
//!
//! This crate translates the legacy Zcash network protocol
//! into a stateless, request-response oriented protocol defined by
//! the [`Request`] and [`Response`] enums. `zebra-network` completely
//! encapsulates all peer handling code behind a single
//! [`tower::Service`] representing "the network", which load-balances
//! outbound [`Request`]s over available peers.
//!
//! Unlike the underlying legacy network protocol, Zebra's [`PeerSet`]
//! [`tower::Service`] guarantees that each `Request` future will resolve to
//! the correct `Response`, rather than an unrelated `Response` message.
//!
//! Each peer connection is handled by a distinct [`Connection`] task.
//! The Zcash network protocol is bidirectional, so Zebra interprets incoming
//! Zcash messages as either:
//! - [`Response`]s to previously sent outbound [`Request`]s, or
//! - inbound [`Request`]s to an internal [`tower::Service`] representing "this node".
//!
//! All connection state is isolated to individual peers, so this
//! design is structurally immune to the recent `ping` attack.
//!
//! ### Connection Pool
//!
//! Because [`tower::Service`]s provide backpressure information, we
//! can dynamically manage the size of the connection pool according
//! to inbound and outbound demand. The inbound service can shed load
//! when it is not ready for requests, causing those peer connections
//! to close, and the outbound service can connect to additional peers
//! when it is overloaded.
//!
//! ## `zebra-network` Structure
//!
//! [`zebra-network::init`] is the main entry point for `zebra-network`.
//! It uses the following services, tasks, and endpoints:
//!
//! ### Low-Level Network Connections
//!
//! Inbound Zcash Listener Task:
//! * accepts inbound connections on the listener port
//! * initiates Zcash [`Handshake`]s, which creates [`Connection`] tasks for each inbound connection
//!
//! Outbound Zcash Connector Service:
//! * initiates outbound connections to peer addresses
//! * initiates Zcash [`Handshake`]s, which creates [`Connection`] tasks for each outbound connection
//!
//! Zebra uses direct TCP connections to share blocks and mempool transactions with other peers.
//!
//! The [`isolated`] APIs provide anonymised TCP and [Tor](https://crates.io/crates/arti)
//! connections to individual peers.
//! These isolated connections can be used to send user-generated transactions anonymously.
//!
//! ### Individual Peer Connections
//!
//! Each new peer connection spawns the following tasks:
//!
//! [`peer::Client`] Service:
//! * provides an interface for outbound requests to an individual peer
//! * accepts [`Request`]s assigned to this peer by the [`PeerSet`]
//! * sends each request to the peer as Zcash [`Message`]
//! * waits for the inbound response [`Message`] from the peer, and returns it as a [`Response`]
//!
//! [`peer::Connection`] Service:
//! * manages connection state: awaiting a request, or handling an inbound or outbound response
//! * provides an interface for inbound requests from an individual peer
//! * accepts inbound Zcash [`Message`]s from this peer
//! * handles each message as a [`Request`] to the inbound service
//! * sends the [`Response`] to the peer as Zcash [`Message`]s
//! * drops peer connections if the inbound request queue is overloaded
//!
//! Since the Zcash network protocol is bidirectional,
//! inbound and outbound connections are handled using the same logic.
//!
//! ### Connection Pool
//!
//! [`PeerSet`] Network Service:
//! * provides an interface for other services and tasks running within this node
//! to make requests to remote peers ("the rest of the network")
//! * accepts [`Request`]s from the local node
//! * sends each request to a [`peer::Client`] using randomised load-balancing
//! * returns the [`Response`] from the [`peer::Client`]
//!
//! Inbound Network Service:
//! * provides an interface for remote peers to request data held by this node
//! * accepts inbound Zcash [`Request`]s from [`peer::Connection`]s
//! * handles each message as a [`Request`] to the local node
//! * sends the [`Response`] to the [`peer::Connection`]
//!
//! Note: the inbound service is implemented by the [`zebra-network::init`] caller.
//!
//! Peer Inventory Service:
//! * tracks gossiped `inv` advertisements for each peer
//! * tracks missing inventory for each peer
//! * used by the [`PeerSet`] to route block and transaction requests to peers that have the requested data
//!
//! ### Peer Discovery
//!
//! [`AddressBook`] Service:
//! * maintains a list of peer addresses and associated connection attempt metadata
//! * address book metadata is used to prioritise peer connection attempts
//!
//! Initial Seed Peer Task:
//! * initiates new outbound peer connections to seed peers, resolving them via DNS if required
//! * adds seed peer addresses to the [`AddressBook`]
//!
//! Peer Crawler Task:
//! * discovers new peer addresses by sending [`Addr`] requests to connected peers
//! * initiates new outbound peer connections in response to application demand
#![doc(html_favicon_url = "https://www.zfnd.org/images/zebra-favicon-128.png")]
#![doc(html_logo_url = "https://www.zfnd.org/images/zebra-icon.png")]
#![doc(html_root_url = "https://doc.zebra.zfnd.org/zebra_network")]
#[macro_use]
extern crate pin_project;
#[macro_use]
extern crate serde;
#[macro_use]
extern crate tracing;
#[macro_use]
extern crate bitflags;
/// Type alias to make working with tower traits easier.
///
/// Note: the 'static lifetime bound means that the *type* cannot have any
/// non-'static lifetimes, (e.g., when a type contains a borrow and is
/// parameterized by 'a), *not* that the object itself has 'static lifetime.
pub type BoxError = Box<dyn std::error::Error + Send + Sync + 'static>;
mod address_book;
mod address_book_updater;
mod config;
pub mod constants;
mod isolated;
mod meta_addr;
mod peer;
mod peer_set;
mod policies;
mod protocol;
#[cfg(feature = "tor")]
pub use crate::isolated::tor::connect_isolated_tor;
pub use crate::{
address_book::AddressBook,
config::Config,
isolated::{connect_isolated, connect_isolated_tcp_direct},
meta_addr::PeerAddrState,
peer::{HandshakeError, PeerError, SharedPeerError},
peer_set::init,
policies::RetryLimit,
protocol::internal::{Request, Response},
};
/// Types used in the definition of [`Request`] and [`Response`] messages.
pub mod types {
pub use crate::{meta_addr::MetaAddr, protocol::types::PeerServices};
}
| 41.154762 | 108 | 0.711889 |
d7bc4913f64a9b130bbf439c5d6d5dae5dea8006 | 645 | // timing clock structure
extern crate time;
pub struct Clock {
curr_time: f64,
last_time: f64,
clock_period: f64,
}
impl Clock {
pub fn new(freq: f64) -> Clock {
let mut clock = Clock {
curr_time: 0.0,
last_time: 0.0,
clock_period: 1.0 / freq,
};
clock.last_time = time::precise_time_s();
clock
}
pub fn tick(&mut self) -> bool {
self.curr_time = time::precise_time_s();
if self.curr_time - self.last_time >= self.clock_period {
self.last_time = self.curr_time;
return true
}
false
}
}
| 19.545455 | 65 | 0.536434 |
9cbaeed8779d1ad3bb18bb746fee1c871c73b290 | 2,852 | use chrono::prelude::*;
use console::style;
use super::*;
use crate::github::GithubReleases;
static COL_WIDTH_VERSION: usize = 15;
static COL_WIDTH_DESCR: usize = 41;
static COL_WIDTH_CHANNEL: usize = 9;
static COL_WIDTH_PUBLISHED: usize = 18;
// hack needed to correctly fit styled string with unprintable chars to ascii-table
static FORMATTED_DELTA: usize = 8;
pub fn command_versions() -> Result<(), String> {
let release_db = GithubReleases::load_from_github()
.map_err(|e| format!("Unable to fetch release list: {:?}", e))?;
println_name_value(&format!(" {}versions available for download:", BULLET), "");
print!("┌");
(0..COL_WIDTH_VERSION).for_each(|_| print!("─"));
print!("┬");
(0..COL_WIDTH_DESCR).for_each(|_| print!("─"));
print!("┬");
(0..COL_WIDTH_CHANNEL).for_each(|_| print!("─"));
print!("┬");
(0..COL_WIDTH_PUBLISHED).for_each(|_| print!("─"));
println!("┐");
println!(
"│ {0:<1$}│ {2:<3$}│ {4:<5$}│ {6:<7$}│",
"VERSION",
COL_WIDTH_VERSION - 1,
"DESCRIPTION",
COL_WIDTH_DESCR - 1,
"CHANNEL",
COL_WIDTH_CHANNEL - 1,
"PUBLISHED",
COL_WIDTH_PUBLISHED - 1
);
print!("├");
(0..COL_WIDTH_VERSION).for_each(|_| print!("─"));
print!("┼");
(0..COL_WIDTH_DESCR).for_each(|_| print!("─"));
print!("┼");
(0..COL_WIDTH_CHANNEL).for_each(|_| print!("─"));
print!("┼");
(0..COL_WIDTH_PUBLISHED).for_each(|_| print!("─"));
println!("┤");
for release in release_db.all_versions() {
let (description, delta) = match &release.description {
Some(friendly_name) => (friendly_name.clone(), 0),
None => (
style("<unnamed release>").italic().to_string(),
FORMATTED_DELTA,
),
};
let local_time: DateTime<Local> = DateTime::from(release.published_at);
println!(
"│ {version: <version_len$}│ {description: <description_len$}│ {channel: <channel_len$}│ {published: <published_len$}│",
version = style(&release.semver.to_string()).bold().to_string(),
version_len = COL_WIDTH_VERSION - 1 + FORMATTED_DELTA,
description = description,
description_len = COL_WIDTH_DESCR - 1 + delta,
channel = release.channel,
channel_len = COL_WIDTH_CHANNEL - 1,
published = local_time.format("%F %R").to_string(),
published_len = COL_WIDTH_PUBLISHED - 1
);
}
print!("└");
(0..COL_WIDTH_VERSION).for_each(|_| print!("─"));
print!("┴");
(0..COL_WIDTH_DESCR).for_each(|_| print!("─"));
print!("┴");
(0..COL_WIDTH_CHANNEL).for_each(|_| print!("─"));
print!("┴");
(0..COL_WIDTH_PUBLISHED).for_each(|_| print!("─"));
println!("┘");
Ok(())
}
| 31.688889 | 132 | 0.569074 |
334f2812bc3e18df2ed2a29fb86d5cb3daf86e15 | 8,845 | use std::collections::HashSet;
use std::env;
use std::ffi::OsStr;
use std::io;
use std::path::PathBuf;
use clap::{App, Arg, ArgMatches};
use console::style;
use failure::Error;
use if_chain::if_chain;
use serde::Serialize;
use symbolic::common::{ByteView, DebugId};
use symbolic::proguard::ProguardMappingView;
use uuid::Version as UuidVersion;
use walkdir::WalkDir;
use crate::utils::args::validate_id;
use crate::utils::dif::{DifFile, DifType};
use crate::utils::progress::{ProgressBar, ProgressDrawTarget, ProgressStyle};
use crate::utils::system::QuietExit;
// text files larger than 32 megabytes are not considered to be
// valid mapping files when scanning
const MAX_MAPPING_FILE: u64 = 32 * 1024 * 1024;
#[derive(Serialize, Debug)]
struct DifMatch {
#[serde(rename = "type")]
pub ty: DifType,
pub id: DebugId,
pub path: PathBuf,
}
pub fn make_app<'a, 'b: 'a>(app: App<'a, 'b>) -> App<'a, 'b> {
app.about("Locate debug information files for given debug identifiers.")
.arg(
Arg::with_name("types")
.long("type")
.short("t")
.value_name("TYPE")
.multiple(true)
.number_of_values(1)
.possible_values(&["dsym", "elf", "proguard", "breakpad"])
.help(
"Only consider debug information files of the given \
type. By default all types are considered.",
),
)
.arg(
Arg::with_name("no_well_known")
.long("no-well-known")
.help("Do not look for debug symbols in well known locations."),
)
.arg(
Arg::with_name("no_cwd")
.long("no-cwd")
.help("Do not look for debug symbols in the current working directory."),
)
.arg(
Arg::with_name("paths")
.long("path")
.short("p")
.multiple(true)
.number_of_values(1)
.help("Add a path to search recursively for debug info files."),
)
.arg(
Arg::with_name("json")
.long("json")
.help("Format outputs as JSON."),
)
.arg(
Arg::with_name("ids")
.index(1)
.value_name("ID")
.help("The debug identifiers of the files to search for.")
.validator(validate_id)
.multiple(true)
.number_of_values(1),
)
}
fn id_hint(id: &DebugId) -> &'static str {
if id.appendix() > 0 {
return "likely PDB";
}
match id.uuid().get_version() {
Some(UuidVersion::Sha1) => "likely Proguard",
Some(UuidVersion::Md5) => "likely dSYM",
None => "likely ELF Debug",
_ => "unknown",
}
}
// TODO: Reduce complexity of this function
#[allow(clippy::cyclomatic_complexity)]
fn find_ids(
paths: &HashSet<PathBuf>,
types: &HashSet<DifType>,
ids: &HashSet<DebugId>,
as_json: bool,
) -> Result<bool, Error> {
let mut remaining = ids.clone();
let mut proguard_uuids: HashSet<_> = ids
.iter()
.map(|x| x.uuid())
.filter(|&x| x.get_version() == Some(UuidVersion::Sha1))
.collect();
let iter = paths
.iter()
.flat_map(WalkDir::new)
.filter_map(|e| e.ok())
.filter(|e| e.file_type().is_file());
let mut found_files = vec![];
let pb = ProgressBar::new_spinner();
pb.set_draw_target(ProgressDrawTarget::stdout());
pb.set_style(
ProgressStyle::default_spinner()
.tick_chars("/|\\- ")
.template(
"{spinner} Looking for debug info files... {msg:.dim}\
\n debug info files found: {prefix:.yellow}",
),
);
for dirent in iter {
if remaining.is_empty() {
break;
}
if let Some(p) = dirent.file_name().to_str() {
pb.set_message(&p);
}
pb.tick();
pb.set_prefix(&format!("{}", found_files.len()));
let mut found = vec![];
// specifically look for proguard files. We only look for UUID5s
// and only if the file is a text file.
if_chain! {
if !proguard_uuids.is_empty();
if types.contains(&DifType::Proguard);
if dirent.path().extension() == Some(OsStr::new("txt"));
if let Ok(md) = dirent.metadata();
if md.len() < MAX_MAPPING_FILE;
if let Ok(byteview) = ByteView::open(dirent.path());
if let Ok(mapping) = ProguardMappingView::parse(byteview);
if proguard_uuids.contains(&mapping.uuid());
then {
found.push((mapping.uuid().into(), DifType::Proguard));
}
}
// look for dsyms
if_chain! {
if types.contains(&DifType::Dsym);
// we regularly match on .class files but the will never be
// dsyms, so we can quickly skip them here
if dirent.path().extension() != Some(OsStr::new("class"));
if let Ok(dif) = DifFile::open_path(dirent.path(), Some(DifType::Dsym));
then {
for id in dif.ids() {
if remaining.contains(&id) {
found.push((id, DifType::Dsym));
}
}
}
}
// look for elfs
if_chain! {
if types.contains(&DifType::Elf);
if let Ok(dif) = DifFile::open_path(dirent.path(), Some(DifType::Elf));
then {
for id in dif.ids() {
if remaining.contains(&id) {
found.push((id, DifType::Elf));
}
}
}
}
// look for breakpad files
if_chain! {
if types.contains(&DifType::Breakpad);
if dirent.path().extension() == Some(OsStr::new("sym"));
if let Ok(dif) = DifFile::open_path(dirent.path(), Some(DifType::Breakpad));
then {
for id in dif.ids() {
if remaining.contains(&id) {
found.push((id, DifType::Breakpad));
}
}
}
}
for (id, ty) in found {
let path = dirent.path().to_path_buf();
found_files.push(DifMatch { ty, id, path });
remaining.remove(&id);
proguard_uuids.remove(&id.uuid());
}
}
pb.finish_and_clear();
if as_json {
serde_json::to_writer_pretty(&mut io::stdout(), &found_files)?;
println!();
} else {
for m in found_files {
println!(
"{} {} [{}]",
style(m.id).dim(),
m.path.display(),
style(m.ty).yellow()
);
}
if !remaining.is_empty() {
eprintln!("");
eprintln!("missing debug information files:");
for id in &remaining {
eprintln!(" {} ({})", id, id_hint(&id),);
}
}
}
Ok(remaining.is_empty())
}
pub fn execute<'a>(matches: &ArgMatches<'a>) -> Result<(), Error> {
let mut paths = HashSet::new();
let mut types = HashSet::new();
let mut ids = HashSet::new();
// which types should we consider?
if let Some(t) = matches.values_of("types") {
for ty in t {
types.insert(ty.parse().unwrap());
}
} else {
types.insert(DifType::Dsym);
types.insert(DifType::Breakpad);
types.insert(DifType::Proguard);
}
let with_well_known = !matches.is_present("no_well_known");
let with_cwd = !matches.is_present("no_cwd");
// start adding well known locations
if_chain! {
if with_well_known;
if types.contains(&DifType::Dsym);
if let Some(path) = dirs::home_dir().map(|x| x.join("Library/Developer/Xcode/DerivedData"));
if path.is_dir();
then {
paths.insert(path);
}
}
// current folder if wanted
if_chain! {
if with_cwd;
if let Ok(path) = env::current_dir();
then {
paths.insert(path);
}
}
// extra paths
if let Some(p) = matches.values_of("paths") {
for path in p {
paths.insert(PathBuf::from(path));
}
}
// which ids are we looking for?
if let Some(i) = matches.values_of("ids") {
for id in i {
ids.insert(id.parse().unwrap());
}
} else {
return Ok(());
}
if !find_ids(&paths, &types, &ids, matches.is_present("json"))? {
return Err(QuietExit(1).into());
}
Ok(())
}
| 29.881757 | 100 | 0.506727 |
1ce882c92db8008095b497b7af6ce60f2cc355d7 | 58,959 | #![doc = "generated by AutoRust 0.1.0"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub mod bots {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
resource_name: &str,
subscription_id: &str,
) -> std::result::Result<Bot, get::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BotService/botServices/{}",
&operation_config.base_path, subscription_id, resource_group_name, resource_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(get::BuildRequestError)?;
let rsp = client.execute(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: Bot = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: Error = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
get::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::Error },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn create(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
resource_name: &str,
parameters: &Bot,
subscription_id: &str,
) -> std::result::Result<create::Response, create::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BotService/botServices/{}",
&operation_config.base_path, subscription_id, resource_group_name, resource_name
);
let mut req_builder = client.put(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(create::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(parameters);
let req = req_builder.build().context(create::BuildRequestError)?;
let rsp = client.execute(req).await.context(create::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(create::ResponseBytesError)?;
let rsp_value: Bot = serde_json::from_slice(&body).context(create::DeserializeError { body })?;
Ok(create::Response::Ok200(rsp_value))
}
StatusCode::CREATED => {
let body: bytes::Bytes = rsp.bytes().await.context(create::ResponseBytesError)?;
let rsp_value: Bot = serde_json::from_slice(&body).context(create::DeserializeError { body })?;
Ok(create::Response::Created201(rsp_value))
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(create::ResponseBytesError)?;
let rsp_value: Error = serde_json::from_slice(&body).context(create::DeserializeError { body })?;
create::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod create {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(Bot),
Created201(Bot),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::Error },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
resource_name: &str,
parameters: &Bot,
subscription_id: &str,
) -> std::result::Result<update::Response, update::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BotService/botServices/{}",
&operation_config.base_path, subscription_id, resource_group_name, resource_name
);
let mut req_builder = client.patch(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(update::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(parameters);
let req = req_builder.build().context(update::BuildRequestError)?;
let rsp = client.execute(req).await.context(update::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?;
let rsp_value: Bot = serde_json::from_slice(&body).context(update::DeserializeError { body })?;
Ok(update::Response::Ok200(rsp_value))
}
StatusCode::CREATED => {
let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?;
let rsp_value: Bot = serde_json::from_slice(&body).context(update::DeserializeError { body })?;
Ok(update::Response::Created201(rsp_value))
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?;
let rsp_value: Error = serde_json::from_slice(&body).context(update::DeserializeError { body })?;
update::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod update {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(Bot),
Created201(Bot),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::Error },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
resource_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BotService/botServices/{}",
&operation_config.base_path, subscription_id, resource_group_name, resource_name
);
let mut req_builder = client.delete(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(delete::BuildRequestError)?;
let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(delete::Response::Ok200),
StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?;
let rsp_value: Error = serde_json::from_slice(&body).context(delete::DeserializeError { body })?;
delete::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::Error },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list_by_resource_group(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
subscription_id: &str,
) -> std::result::Result<BotResponseList, list_by_resource_group::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BotService/botServices",
&operation_config.base_path, subscription_id, resource_group_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list_by_resource_group::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list_by_resource_group::BuildRequestError)?;
let rsp = client.execute(req).await.context(list_by_resource_group::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list_by_resource_group::ResponseBytesError)?;
let rsp_value: BotResponseList =
serde_json::from_slice(&body).context(list_by_resource_group::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list_by_resource_group::ResponseBytesError)?;
let rsp_value: Error = serde_json::from_slice(&body).context(list_by_resource_group::DeserializeError { body })?;
list_by_resource_group::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_by_resource_group {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::Error },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<BotResponseList, list::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.BotService/botServices",
&operation_config.base_path, subscription_id
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list::BuildRequestError)?;
let rsp = client.execute(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: BotResponseList = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: Error = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
list::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::Error },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn get_check_name_availability(
operation_config: &crate::OperationConfig,
parameters: &CheckNameAvailabilityRequestBody,
) -> std::result::Result<CheckNameAvailabilityResponseBody, get_check_name_availability::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/providers/Microsoft.BotService/botServices/checkNameAvailability",
&operation_config.base_path,
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get_check_name_availability::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(parameters);
let req = req_builder.build().context(get_check_name_availability::BuildRequestError)?;
let rsp = client
.execute(req)
.await
.context(get_check_name_availability::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get_check_name_availability::ResponseBytesError)?;
let rsp_value: CheckNameAvailabilityResponseBody =
serde_json::from_slice(&body).context(get_check_name_availability::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get_check_name_availability::ResponseBytesError)?;
let rsp_value: Error = serde_json::from_slice(&body).context(get_check_name_availability::DeserializeError { body })?;
get_check_name_availability::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get_check_name_availability {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::Error },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub mod channels {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
resource_name: &str,
channel_name: &str,
subscription_id: &str,
) -> std::result::Result<BotChannel, get::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BotService/botServices/{}/channels/{}",
&operation_config.base_path, subscription_id, resource_group_name, resource_name, channel_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(get::BuildRequestError)?;
let rsp = client.execute(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: BotChannel = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: Error = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
get::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::Error },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn create(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
resource_name: &str,
channel_name: &str,
parameters: &BotChannel,
subscription_id: &str,
) -> std::result::Result<create::Response, create::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BotService/botServices/{}/channels/{}",
&operation_config.base_path, subscription_id, resource_group_name, resource_name, channel_name
);
let mut req_builder = client.put(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(create::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(parameters);
let req = req_builder.build().context(create::BuildRequestError)?;
let rsp = client.execute(req).await.context(create::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(create::ResponseBytesError)?;
let rsp_value: BotChannel = serde_json::from_slice(&body).context(create::DeserializeError { body })?;
Ok(create::Response::Ok200(rsp_value))
}
StatusCode::CREATED => {
let body: bytes::Bytes = rsp.bytes().await.context(create::ResponseBytesError)?;
let rsp_value: BotChannel = serde_json::from_slice(&body).context(create::DeserializeError { body })?;
Ok(create::Response::Created201(rsp_value))
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(create::ResponseBytesError)?;
let rsp_value: Error = serde_json::from_slice(&body).context(create::DeserializeError { body })?;
create::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod create {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(BotChannel),
Created201(BotChannel),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::Error },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
resource_name: &str,
channel_name: &str,
parameters: &BotChannel,
subscription_id: &str,
) -> std::result::Result<update::Response, update::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BotService/botServices/{}/channels/{}",
&operation_config.base_path, subscription_id, resource_group_name, resource_name, channel_name
);
let mut req_builder = client.patch(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(update::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(parameters);
let req = req_builder.build().context(update::BuildRequestError)?;
let rsp = client.execute(req).await.context(update::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?;
let rsp_value: BotChannel = serde_json::from_slice(&body).context(update::DeserializeError { body })?;
Ok(update::Response::Ok200(rsp_value))
}
StatusCode::CREATED => {
let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?;
let rsp_value: BotChannel = serde_json::from_slice(&body).context(update::DeserializeError { body })?;
Ok(update::Response::Created201(rsp_value))
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?;
let rsp_value: Error = serde_json::from_slice(&body).context(update::DeserializeError { body })?;
update::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod update {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(BotChannel),
Created201(BotChannel),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::Error },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
resource_name: &str,
channel_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BotService/botServices/{}/channels/{}",
&operation_config.base_path, subscription_id, resource_group_name, resource_name, channel_name
);
let mut req_builder = client.delete(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(delete::BuildRequestError)?;
let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(delete::Response::Ok200),
StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?;
let rsp_value: Error = serde_json::from_slice(&body).context(delete::DeserializeError { body })?;
delete::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::Error },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list_with_keys(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
resource_name: &str,
channel_name: &str,
subscription_id: &str,
) -> std::result::Result<BotChannel, list_with_keys::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BotService/botServices/{}/channels/{}/listChannelWithKeys",
&operation_config.base_path, subscription_id, resource_group_name, resource_name, channel_name
);
let mut req_builder = client.post(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list_with_keys::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list_with_keys::BuildRequestError)?;
let rsp = client.execute(req).await.context(list_with_keys::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list_with_keys::ResponseBytesError)?;
let rsp_value: BotChannel = serde_json::from_slice(&body).context(list_with_keys::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list_with_keys::ResponseBytesError)?;
let rsp_value: Error = serde_json::from_slice(&body).context(list_with_keys::DeserializeError { body })?;
list_with_keys::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_with_keys {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::Error },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list_by_resource_group(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
resource_name: &str,
subscription_id: &str,
) -> std::result::Result<ChannelResponseList, list_by_resource_group::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BotService/botServices/{}/channels",
&operation_config.base_path, subscription_id, resource_group_name, resource_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list_by_resource_group::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list_by_resource_group::BuildRequestError)?;
let rsp = client.execute(req).await.context(list_by_resource_group::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list_by_resource_group::ResponseBytesError)?;
let rsp_value: ChannelResponseList =
serde_json::from_slice(&body).context(list_by_resource_group::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list_by_resource_group::ResponseBytesError)?;
let rsp_value: Error = serde_json::from_slice(&body).context(list_by_resource_group::DeserializeError { body })?;
list_by_resource_group::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_by_resource_group {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::Error },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub mod operations {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<OperationEntityListResult, list::Error> {
let client = &operation_config.client;
let uri_str = &format!("{}/providers/Microsoft.BotService/operations", &operation_config.base_path,);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list::BuildRequestError)?;
let rsp = client.execute(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: OperationEntityListResult = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
list::UnexpectedResponse { status_code, body: body }.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
pub mod bot_connection {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn list_service_providers(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<ServiceProviderResponseList, list_service_providers::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.BotService/listAuthServiceProviders",
&operation_config.base_path, subscription_id
);
let mut req_builder = client.post(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list_service_providers::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list_service_providers::BuildRequestError)?;
let rsp = client.execute(req).await.context(list_service_providers::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list_service_providers::ResponseBytesError)?;
let rsp_value: ServiceProviderResponseList =
serde_json::from_slice(&body).context(list_service_providers::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list_service_providers::ResponseBytesError)?;
let rsp_value: Error = serde_json::from_slice(&body).context(list_service_providers::DeserializeError { body })?;
list_service_providers::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_service_providers {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::Error },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list_with_secrets(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
resource_name: &str,
connection_name: &str,
subscription_id: &str,
) -> std::result::Result<ConnectionSetting, list_with_secrets::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BotService/botServices/{}/Connections/{}/listWithSecrets",
&operation_config.base_path, subscription_id, resource_group_name, resource_name, connection_name
);
let mut req_builder = client.post(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list_with_secrets::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list_with_secrets::BuildRequestError)?;
let rsp = client.execute(req).await.context(list_with_secrets::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list_with_secrets::ResponseBytesError)?;
let rsp_value: ConnectionSetting = serde_json::from_slice(&body).context(list_with_secrets::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list_with_secrets::ResponseBytesError)?;
let rsp_value: Error = serde_json::from_slice(&body).context(list_with_secrets::DeserializeError { body })?;
list_with_secrets::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_with_secrets {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::Error },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
resource_name: &str,
connection_name: &str,
subscription_id: &str,
) -> std::result::Result<ConnectionSetting, get::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BotService/botServices/{}/Connections/{}",
&operation_config.base_path, subscription_id, resource_group_name, resource_name, connection_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(get::BuildRequestError)?;
let rsp = client.execute(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: ConnectionSetting = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: Error = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
get::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::Error },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn create(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
resource_name: &str,
connection_name: &str,
parameters: &ConnectionSetting,
subscription_id: &str,
) -> std::result::Result<create::Response, create::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BotService/botServices/{}/Connections/{}",
&operation_config.base_path, subscription_id, resource_group_name, resource_name, connection_name
);
let mut req_builder = client.put(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(create::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(parameters);
let req = req_builder.build().context(create::BuildRequestError)?;
let rsp = client.execute(req).await.context(create::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(create::ResponseBytesError)?;
let rsp_value: ConnectionSetting = serde_json::from_slice(&body).context(create::DeserializeError { body })?;
Ok(create::Response::Ok200(rsp_value))
}
StatusCode::CREATED => {
let body: bytes::Bytes = rsp.bytes().await.context(create::ResponseBytesError)?;
let rsp_value: ConnectionSetting = serde_json::from_slice(&body).context(create::DeserializeError { body })?;
Ok(create::Response::Created201(rsp_value))
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(create::ResponseBytesError)?;
let rsp_value: Error = serde_json::from_slice(&body).context(create::DeserializeError { body })?;
create::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod create {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(ConnectionSetting),
Created201(ConnectionSetting),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::Error },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
resource_name: &str,
connection_name: &str,
parameters: &ConnectionSetting,
subscription_id: &str,
) -> std::result::Result<update::Response, update::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BotService/botServices/{}/Connections/{}",
&operation_config.base_path, subscription_id, resource_group_name, resource_name, connection_name
);
let mut req_builder = client.patch(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(update::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(parameters);
let req = req_builder.build().context(update::BuildRequestError)?;
let rsp = client.execute(req).await.context(update::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?;
let rsp_value: ConnectionSetting = serde_json::from_slice(&body).context(update::DeserializeError { body })?;
Ok(update::Response::Ok200(rsp_value))
}
StatusCode::CREATED => {
let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?;
let rsp_value: ConnectionSetting = serde_json::from_slice(&body).context(update::DeserializeError { body })?;
Ok(update::Response::Created201(rsp_value))
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?;
let rsp_value: Error = serde_json::from_slice(&body).context(update::DeserializeError { body })?;
update::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod update {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(ConnectionSetting),
Created201(ConnectionSetting),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::Error },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
resource_name: &str,
connection_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BotService/botServices/{}/Connections/{}",
&operation_config.base_path, subscription_id, resource_group_name, resource_name, connection_name
);
let mut req_builder = client.delete(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(delete::BuildRequestError)?;
let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(delete::Response::Ok200),
StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?;
let rsp_value: Error = serde_json::from_slice(&body).context(delete::DeserializeError { body })?;
delete::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::Error },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
pub async fn list_by_bot_service(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
resource_name: &str,
subscription_id: &str,
) -> std::result::Result<ConnectionSettingResponseList, list_by_bot_service::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.BotService/botServices/{}/connections",
&operation_config.base_path, subscription_id, resource_group_name, resource_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list_by_bot_service::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list_by_bot_service::BuildRequestError)?;
let rsp = client.execute(req).await.context(list_by_bot_service::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list_by_bot_service::ResponseBytesError)?;
let rsp_value: ConnectionSettingResponseList =
serde_json::from_slice(&body).context(list_by_bot_service::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list_by_bot_service::ResponseBytesError)?;
let rsp_value: Error = serde_json::from_slice(&body).context(list_by_bot_service::DeserializeError { body })?;
list_by_bot_service::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_by_bot_service {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse { status_code: StatusCode, value: models::Error },
BuildRequestError { source: reqwest::Error },
ExecuteRequestError { source: reqwest::Error },
ResponseBytesError { source: reqwest::Error },
DeserializeError { source: serde_json::Error, body: bytes::Bytes },
GetTokenError { source: azure_core::errors::AzureError },
}
}
}
| 47.585956 | 136 | 0.597907 |
8f9f3983262372fdcd28e8782d6e6855c4ed933c | 8,585 | //! This module provides a simplified abstraction for working with
//! code blocks identified by their integer `NodeId`. In particular,
//! it captures a common set of attributes that all "function-like
//! things" (represented by `FnLike` instances) share. For example,
//! all `FnLike` instances have a type signature (be it explicit or
//! inferred). And all `FnLike` instances have a body, i.e., the code
//! that is run when the function-like thing it represents is invoked.
//!
//! With the above abstraction in place, one can treat the program
//! text as a collection of blocks of code (and most such blocks are
//! nested within a uniquely determined `FnLike`), and users can ask
//! for the `Code` associated with a particular NodeId.
use crate::hir as ast;
use crate::hir::map;
use crate::hir::{Expr, FnDecl, Node};
use crate::hir::intravisit::FnKind;
use syntax::ast::{Attribute, Ident};
use syntax_pos::Span;
/// An FnLikeNode is a Node that is like a fn, in that it has a decl
/// and a body (as well as a NodeId, a span, etc).
///
/// More specifically, it is one of either:
///
/// - A function item,
/// - A closure expr (i.e., an ExprKind::Closure), or
/// - The default implementation for a trait method.
///
/// To construct one, use the `Code::from_node` function.
#[derive(Copy, Clone, Debug)]
pub struct FnLikeNode<'a> { node: Node<'a> }
/// MaybeFnLike wraps a method that indicates if an object
/// corresponds to some FnLikeNode.
trait MaybeFnLike { fn is_fn_like(&self) -> bool; }
impl MaybeFnLike for ast::Item {
fn is_fn_like(&self) -> bool {
match self.kind {
ast::ItemKind::Fn(..) => true,
_ => false,
}
}
}
impl MaybeFnLike for ast::ImplItem {
fn is_fn_like(&self) -> bool {
match self.kind {
ast::ImplItemKind::Method(..) => true,
_ => false,
}
}
}
impl MaybeFnLike for ast::TraitItem {
fn is_fn_like(&self) -> bool {
match self.kind {
ast::TraitItemKind::Method(_, ast::TraitMethod::Provided(_)) => true,
_ => false,
}
}
}
impl MaybeFnLike for ast::Expr {
fn is_fn_like(&self) -> bool {
match self.kind {
ast::ExprKind::Closure(..) => true,
_ => false,
}
}
}
/// Carries either an FnLikeNode or a Expr, as these are the two
/// constructs that correspond to "code" (as in, something from which
/// we can construct a control-flow graph).
#[derive(Copy, Clone)]
pub enum Code<'a> {
FnLike(FnLikeNode<'a>),
Expr(&'a Expr),
}
impl<'a> Code<'a> {
pub fn id(&self) -> ast::HirId {
match *self {
Code::FnLike(node) => node.id(),
Code::Expr(block) => block.hir_id,
}
}
/// Attempts to construct a Code from presumed FnLike or Expr node input.
pub fn from_node(map: &map::Map<'a>, id: ast::HirId) -> Option<Code<'a>> {
match map.get(id) {
map::Node::Block(_) => {
// Use the parent, hopefully an expression node.
Code::from_node(map, map.get_parent_node(id))
}
map::Node::Expr(expr) => Some(Code::Expr(expr)),
node => FnLikeNode::from_node(node).map(Code::FnLike)
}
}
}
/// These are all the components one can extract from a fn item for
/// use when implementing FnLikeNode operations.
struct ItemFnParts<'a> {
ident: Ident,
decl: &'a ast::FnDecl,
header: ast::FnHeader,
vis: &'a ast::Visibility,
generics: &'a ast::Generics,
body: ast::BodyId,
id: ast::HirId,
span: Span,
attrs: &'a [Attribute],
}
/// These are all the components one can extract from a closure expr
/// for use when implementing FnLikeNode operations.
struct ClosureParts<'a> {
decl: &'a FnDecl,
body: ast::BodyId,
id: ast::HirId,
span: Span,
attrs: &'a [Attribute],
}
impl<'a> ClosureParts<'a> {
fn new(d: &'a FnDecl, b: ast::BodyId, id: ast::HirId, s: Span, attrs: &'a [Attribute]) -> Self {
ClosureParts {
decl: d,
body: b,
id,
span: s,
attrs,
}
}
}
impl<'a> FnLikeNode<'a> {
/// Attempts to construct a FnLikeNode from presumed FnLike node input.
pub fn from_node(node: Node<'_>) -> Option<FnLikeNode<'_>> {
let fn_like = match node {
map::Node::Item(item) => item.is_fn_like(),
map::Node::TraitItem(tm) => tm.is_fn_like(),
map::Node::ImplItem(it) => it.is_fn_like(),
map::Node::Expr(e) => e.is_fn_like(),
_ => false
};
fn_like.then_some(FnLikeNode { node })
}
pub fn body(self) -> ast::BodyId {
self.handle(|i: ItemFnParts<'a>| i.body,
|_, _, _: &'a ast::FnSig, _, body: ast::BodyId, _, _| body,
|c: ClosureParts<'a>| c.body)
}
pub fn decl(self) -> &'a FnDecl {
self.handle(|i: ItemFnParts<'a>| &*i.decl,
|_, _, sig: &'a ast::FnSig, _, _, _, _| &sig.decl,
|c: ClosureParts<'a>| c.decl)
}
pub fn span(self) -> Span {
self.handle(|i: ItemFnParts<'_>| i.span,
|_, _, _: &'a ast::FnSig, _, _, span, _| span,
|c: ClosureParts<'_>| c.span)
}
pub fn id(self) -> ast::HirId {
self.handle(|i: ItemFnParts<'_>| i.id,
|id, _, _: &'a ast::FnSig, _, _, _, _| id,
|c: ClosureParts<'_>| c.id)
}
pub fn constness(self) -> ast::Constness {
self.kind().header().map_or(ast::Constness::NotConst, |header| header.constness)
}
pub fn asyncness(self) -> ast::IsAsync {
self.kind().header().map_or(ast::IsAsync::NotAsync, |header| header.asyncness)
}
pub fn unsafety(self) -> ast::Unsafety {
self.kind().header().map_or(ast::Unsafety::Normal, |header| header.unsafety)
}
pub fn kind(self) -> FnKind<'a> {
let item = |p: ItemFnParts<'a>| -> FnKind<'a> {
FnKind::ItemFn(p.ident, p.generics, p.header, p.vis, p.attrs)
};
let closure = |c: ClosureParts<'a>| {
FnKind::Closure(c.attrs)
};
let method = |_, ident: Ident, sig: &'a ast::FnSig, vis, _, _, attrs| {
FnKind::Method(ident, sig, vis, attrs)
};
self.handle(item, method, closure)
}
fn handle<A, I, M, C>(self, item_fn: I, method: M, closure: C) -> A where
I: FnOnce(ItemFnParts<'a>) -> A,
M: FnOnce(ast::HirId,
Ident,
&'a ast::FnSig,
Option<&'a ast::Visibility>,
ast::BodyId,
Span,
&'a [Attribute])
-> A,
C: FnOnce(ClosureParts<'a>) -> A,
{
match self.node {
map::Node::Item(i) => match i.kind {
ast::ItemKind::Fn(ref sig, ref generics, block) =>
item_fn(ItemFnParts {
id: i.hir_id,
ident: i.ident,
decl: &sig.decl,
body: block,
vis: &i.vis,
span: i.span,
attrs: &i.attrs,
header: sig.header,
generics,
}),
_ => bug!("item FnLikeNode that is not fn-like"),
},
map::Node::TraitItem(ti) => match ti.kind {
ast::TraitItemKind::Method(ref sig, ast::TraitMethod::Provided(body)) => {
method(ti.hir_id, ti.ident, sig, None, body, ti.span, &ti.attrs)
}
_ => bug!("trait method FnLikeNode that is not fn-like"),
},
map::Node::ImplItem(ii) => {
match ii.kind {
ast::ImplItemKind::Method(ref sig, body) => {
method(ii.hir_id, ii.ident, sig, Some(&ii.vis), body, ii.span, &ii.attrs)
}
_ => bug!("impl method FnLikeNode that is not fn-like")
}
},
map::Node::Expr(e) => match e.kind {
ast::ExprKind::Closure(_, ref decl, block, _fn_decl_span, _gen) =>
closure(ClosureParts::new(&decl, block, e.hir_id, e.span, &e.attrs)),
_ => bug!("expr FnLikeNode that is not fn-like"),
},
_ => bug!("other FnLikeNode that is not fn-like"),
}
}
}
| 33.932806 | 100 | 0.524752 |
ffaa8b2811a0c8454ee3def3aacc98ef7c7f93ff | 9,478 | //! lint on enum variants that are prefixed or suffixed by the same characters
use crate::utils::{camel_case, in_macro};
use crate::utils::{span_help_and_lint, span_lint};
use rustc::lint::{EarlyContext, EarlyLintPass, Lint, LintArray, LintPass};
use rustc::{declare_tool_lint, lint_array};
use syntax::ast::*;
use syntax::source_map::Span;
use syntax::symbol::{InternedString, LocalInternedString};
/// **What it does:** Detects enumeration variants that are prefixed or suffixed
/// by the same characters.
///
/// **Why is this bad?** Enumeration variant names should specify their variant,
/// not repeat the enumeration name.
///
/// **Known problems:** None.
///
/// **Example:**
/// ```rust
/// enum Cake {
/// BlackForestCake,
/// HummingbirdCake,
/// BattenbergCake,
/// }
/// ```
declare_clippy_lint! {
pub ENUM_VARIANT_NAMES,
style,
"enums where all variants share a prefix/postfix"
}
/// **What it does:** Detects enumeration variants that are prefixed or suffixed
/// by the same characters.
///
/// **Why is this bad?** Enumeration variant names should specify their variant,
/// not repeat the enumeration name.
///
/// **Known problems:** None.
///
/// **Example:**
/// ```rust
/// enum Cake {
/// BlackForestCake,
/// HummingbirdCake,
/// BattenbergCake,
/// }
/// ```
declare_clippy_lint! {
pub PUB_ENUM_VARIANT_NAMES,
pedantic,
"enums where all variants share a prefix/postfix"
}
/// **What it does:** Detects type names that are prefixed or suffixed by the
/// containing module's name.
///
/// **Why is this bad?** It requires the user to type the module name twice.
///
/// **Known problems:** None.
///
/// **Example:**
/// ```rust
/// mod cake {
/// struct BlackForestCake;
/// }
/// ```
declare_clippy_lint! {
pub MODULE_NAME_REPETITIONS,
pedantic,
"type names prefixed/postfixed with their containing module's name"
}
/// **What it does:** Checks for modules that have the same name as their
/// parent module
///
/// **Why is this bad?** A typical beginner mistake is to have `mod foo;` and
/// again `mod foo { ..
/// }` in `foo.rs`.
/// The expectation is that items inside the inner `mod foo { .. }` are then
/// available
/// through `foo::x`, but they are only available through
/// `foo::foo::x`.
/// If this is done on purpose, it would be better to choose a more
/// representative module name.
///
/// **Known problems:** None.
///
/// **Example:**
/// ```rust
/// // lib.rs
/// mod foo;
/// // foo.rs
/// mod foo {
/// ...
/// }
/// ```
declare_clippy_lint! {
pub MODULE_INCEPTION,
style,
"modules that have the same name as their parent module"
}
pub struct EnumVariantNames {
modules: Vec<(InternedString, String)>,
threshold: u64,
}
impl EnumVariantNames {
pub fn new(threshold: u64) -> Self {
Self {
modules: Vec::new(),
threshold,
}
}
}
impl LintPass for EnumVariantNames {
fn get_lints(&self) -> LintArray {
lint_array!(
ENUM_VARIANT_NAMES,
PUB_ENUM_VARIANT_NAMES,
MODULE_NAME_REPETITIONS,
MODULE_INCEPTION
)
}
}
fn var2str(var: &Variant) -> LocalInternedString {
var.node.ident.as_str()
}
/// Returns the number of chars that match from the start
fn partial_match(pre: &str, name: &str) -> usize {
let mut name_iter = name.chars();
let _ = name_iter.next_back(); // make sure the name is never fully matched
pre.chars().zip(name_iter).take_while(|&(l, r)| l == r).count()
}
/// Returns the number of chars that match from the end
fn partial_rmatch(post: &str, name: &str) -> usize {
let mut name_iter = name.chars();
let _ = name_iter.next(); // make sure the name is never fully matched
post.chars()
.rev()
.zip(name_iter.rev())
.take_while(|&(l, r)| l == r)
.count()
}
fn check_variant(
cx: &EarlyContext<'_>,
threshold: u64,
def: &EnumDef,
item_name: &str,
item_name_chars: usize,
span: Span,
lint: &'static Lint,
) {
if (def.variants.len() as u64) < threshold {
return;
}
for var in &def.variants {
let name = var2str(var);
if partial_match(item_name, &name) == item_name_chars
&& name.chars().nth(item_name_chars).map_or(false, |c| !c.is_lowercase())
{
span_lint(cx, lint, var.span, "Variant name starts with the enum's name");
}
if partial_rmatch(item_name, &name) == item_name_chars {
span_lint(cx, lint, var.span, "Variant name ends with the enum's name");
}
}
let first = var2str(&def.variants[0]);
let mut pre = &first[..camel_case::until(&*first)];
let mut post = &first[camel_case::from(&*first)..];
for var in &def.variants {
let name = var2str(var);
let pre_match = partial_match(pre, &name);
pre = &pre[..pre_match];
let pre_camel = camel_case::until(pre);
pre = &pre[..pre_camel];
while let Some((next, last)) = name[pre.len()..].chars().zip(pre.chars().rev()).next() {
if next.is_lowercase() {
let last = pre.len() - last.len_utf8();
let last_camel = camel_case::until(&pre[..last]);
pre = &pre[..last_camel];
} else {
break;
}
}
let post_match = partial_rmatch(post, &name);
let post_end = post.len() - post_match;
post = &post[post_end..];
let post_camel = camel_case::from(post);
post = &post[post_camel..];
}
let (what, value) = match (pre.is_empty(), post.is_empty()) {
(true, true) => return,
(false, _) => ("pre", pre),
(true, false) => ("post", post),
};
span_help_and_lint(
cx,
lint,
span,
&format!("All variants have the same {}fix: `{}`", what, value),
&format!(
"remove the {}fixes and use full paths to \
the variants instead of glob imports",
what
),
);
}
fn to_camel_case(item_name: &str) -> String {
let mut s = String::new();
let mut up = true;
for c in item_name.chars() {
if c.is_uppercase() {
// we only turn snake case text into CamelCase
return item_name.to_string();
}
if c == '_' {
up = true;
continue;
}
if up {
up = false;
s.extend(c.to_uppercase());
} else {
s.push(c);
}
}
s
}
impl EarlyLintPass for EnumVariantNames {
fn check_item_post(&mut self, _cx: &EarlyContext<'_>, _item: &Item) {
let last = self.modules.pop();
assert!(last.is_some());
}
#[allow(clippy::similar_names)]
fn check_item(&mut self, cx: &EarlyContext<'_>, item: &Item) {
let item_name = item.ident.as_str();
let item_name_chars = item_name.chars().count();
let item_camel = to_camel_case(&item_name);
if !in_macro(item.span) {
if let Some(&(ref mod_name, ref mod_camel)) = self.modules.last() {
// constants don't have surrounding modules
if !mod_camel.is_empty() {
if *mod_name == item_name {
if let ItemKind::Mod(..) = item.node {
span_lint(
cx,
MODULE_INCEPTION,
item.span,
"module has the same name as its containing module",
);
}
}
if item.vis.node.is_pub() {
let matching = partial_match(mod_camel, &item_camel);
let rmatching = partial_rmatch(mod_camel, &item_camel);
let nchars = mod_camel.chars().count();
let is_word_beginning = |c: char| c == '_' || c.is_uppercase() || c.is_numeric();
if matching == nchars {
match item_camel.chars().nth(nchars) {
Some(c) if is_word_beginning(c) => span_lint(
cx,
MODULE_NAME_REPETITIONS,
item.span,
"item name starts with its containing module's name",
),
_ => (),
}
}
if rmatching == nchars {
span_lint(
cx,
MODULE_NAME_REPETITIONS,
item.span,
"item name ends with its containing module's name",
);
}
}
}
}
}
if let ItemKind::Enum(ref def, _) = item.node {
let lint = match item.vis.node {
VisibilityKind::Public => PUB_ENUM_VARIANT_NAMES,
_ => ENUM_VARIANT_NAMES,
};
check_variant(cx, self.threshold, def, &item_name, item_name_chars, item.span, lint);
}
self.modules.push((item_name.as_interned_str(), item_camel));
}
}
| 31.07541 | 105 | 0.530597 |
b9a6e3ae7f02e74b0e2c0277ebb5f26eee0ea7c2 | 36,576 | use crate::color::*;
use crate::vector2::*;
use crate::matrix3::*;
use crate::image::*;
use crate::font::*;
use crate::math::*;
use crate::framebuffer::*;
// Draw Mode Definition
pub type PSetOp = fn(&mut Rasterizer, usize, Color);
// Calls functions in other structures with each pixel drawn. Good for
pub type OuterOp = fn(i32, i32, Color);
/// Controls how a rasterizer should draw incoming pixels.
#[derive(Debug, Clone, Copy)]
pub enum DrawMode {
NoOp,
NoAlpha,
Opaque,
Alpha,
Addition,
Subtraction,
Multiply,
Divide,
InvertedAlpha,
InvertedOpaque,
// Collect,
}
fn pset_noop(rasterizer: &mut Rasterizer, idx: usize, color: Color) {
rasterizer.drawn_pixels_since_cls += 1;
}
fn pset_noalpha(rasterizer: &mut Rasterizer, idx: usize, color: Color) {
let color = color * rasterizer.tint;
rasterizer.framebuffer.color[idx + 0] = color.r; // R
rasterizer.framebuffer.color[idx + 1] = color.g; // G
rasterizer.framebuffer.color[idx + 2] = color.b; // B
rasterizer.framebuffer.color[idx + 3] = color.a; // A
rasterizer.drawn_pixels_since_cls += 1;
}
/// Draw pixels if they are fully opaque, otherwise ignore them.
fn pset_opaque(rasterizer: &mut Rasterizer, idx: usize, color: Color) {
if color.a < 255 { return; }
let color = color * rasterizer.tint;
rasterizer.framebuffer.color[idx + 0] = color.r; // R
rasterizer.framebuffer.color[idx + 1] = color.g; // G
rasterizer.framebuffer.color[idx + 2] = color.b; // B
rasterizer.framebuffer.color[idx + 3] = color.a; // A
rasterizer.drawn_pixels_since_cls += 1;
}
/// Draw pixels and blend them with the background based on the alpha channel
fn pset_alpha(rasterizer: &mut Rasterizer, idx: usize, color: Color) {
let fg = color * rasterizer.tint;
let bg = Color::new(
rasterizer.framebuffer.color[idx + 0],
rasterizer.framebuffer.color[idx + 1],
rasterizer.framebuffer.color[idx + 2],
255,
);
let c = if rasterizer.use_fast_alpha_blend {
Color::blend_fast(fg, bg, rasterizer.opacity)
} else {
Color::blend(fg, bg, rasterizer.opacity as f32 / 255.0)
};
rasterizer.framebuffer.color[idx + 0] = c.r; // R
rasterizer.framebuffer.color[idx + 1] = c.g; // G
rasterizer.framebuffer.color[idx + 2] = c.b; // B
rasterizer.framebuffer.color[idx + 3] = c.a; // A
rasterizer.drawn_pixels_since_cls += 1;
}
/// Add incoming and buffer pixels together and draw to screen
fn pset_addition(rasterizer: &mut Rasterizer, idx: usize, color: Color) {
if color.a <= 0 { return; }
let fg = color * rasterizer.tint;
let bg = Color::new(
rasterizer.framebuffer.color[idx + 0],
rasterizer.framebuffer.color[idx + 1],
rasterizer.framebuffer.color[idx + 2],
255,
);
let c = if rasterizer.use_fast_alpha_blend {
Color::blend_fast(fg, bg, rasterizer.opacity) + bg
} else {
Color::blend(fg, bg, rasterizer.opacity as f32 / 255.0) + bg
};
rasterizer.framebuffer.color[idx + 0] = c.r; // R
rasterizer.framebuffer.color[idx + 1] = c.g; // G
rasterizer.framebuffer.color[idx + 2] = c.b; // B
rasterizer.framebuffer.color[idx + 3] = c.a; // A
rasterizer.drawn_pixels_since_cls += 1;
}
/// Multiply incoming pixel with buffer pixel.
fn pset_multiply(rasterizer: &mut Rasterizer, idx: usize, color: Color) {
if color.a <= 0 { return; }
let fg = color * rasterizer.tint;
let bg = Color::new(
rasterizer.framebuffer.color[idx + 0],
rasterizer.framebuffer.color[idx + 1],
rasterizer.framebuffer.color[idx + 2],
255,
);
let c = if rasterizer.use_fast_alpha_blend {
Color::blend_fast(fg.inverted(), bg, rasterizer.opacity) * bg
} else {
Color::blend(fg.inverted(), bg, rasterizer.opacity as f32 / 255.0) * bg
};
rasterizer.framebuffer.color[idx + 0] = c.r; // R
rasterizer.framebuffer.color[idx + 1] = c.g; // G
rasterizer.framebuffer.color[idx + 2] = c.b; // B
rasterizer.framebuffer.color[idx + 3] = c.a; // A
rasterizer.drawn_pixels_since_cls += 1;
}
/// Draw inverted copy of incoming pixel with alpha blending
fn pset_inverted_alpha(rasterizer: &mut Rasterizer, idx: usize, color: Color) {
if color.a <= 0 { return; }
let fg = color* rasterizer.tint;
let bg = Color::new(
rasterizer.framebuffer.color[idx + 0],
rasterizer.framebuffer.color[idx + 1],
rasterizer.framebuffer.color[idx + 2],
255,
);
let c = if rasterizer.use_fast_alpha_blend {
Color::blend_fast(fg.inverted(), bg, rasterizer.opacity)
} else {
Color::blend(fg.inverted(), bg, rasterizer.opacity as f32 / 255.0)
};
rasterizer.framebuffer.color[idx + 0] = c.r; // R
rasterizer.framebuffer.color[idx + 1] = c.g; // G
rasterizer.framebuffer.color[idx + 2] = c.b; // B
rasterizer.framebuffer.color[idx + 3] = c.a; // A
rasterizer.drawn_pixels_since_cls += 1;
}
/// Draw inverted copy of incoming pixel as opaque
fn pset_inverted_opaque(rasterizer: &mut Rasterizer, idx: usize, color: Color) {
if color.a < 255 { return; }
let c = (color * rasterizer.tint).inverted();
rasterizer.framebuffer.color[idx + 0] = c.r; // R
rasterizer.framebuffer.color[idx + 1] = c.g; // G
rasterizer.framebuffer.color[idx + 2] = c.b; // B
rasterizer.framebuffer.color[idx + 3] = c.a; // A
rasterizer.drawn_pixels_since_cls += 1;
}
/* /// Collect drawn pixels into collected_pixels instead of drawing them to the buffer.
/// This is useful for more advanced graphical effects, for example using a series of ptriangle's to
/// build a polygonal area, then drawing a texture onto the pixels.
fn pset_collect(rasterizer: &mut Rasterizer, idx: usize, color: Color) {
let idx_unstrided = idx / 4;
let x = modi(idx_unstrided as i32, rasterizer.framebuffer.width as i32);
let y = idx_unstrided as i32 / rasterizer.framebuffer.width as i32;
rasterizer.collected_pixels.push((x, y, color));
} */
/// Drawing switchboard that draws directly into a framebuffer. Drawing options like Tint and Opacity must be manually changed by the user.
#[derive(Clone)]
pub struct Rasterizer {
pset_op: PSetOp,
pub framebuffer: FrameBuffer,
pub draw_mode: DrawMode,
pub tint: Color,
pub opacity: u8,
pub wrapping: bool,
pub use_fast_alpha_blend: bool,
//pub collected_pixels: Vec<(i32, i32, Color)>,
pub camera_x: i32,
pub camera_y: i32,
pub drawn_pixels_since_cls: u64,
pub time_since_cls: std::time::Duration,
}
impl Rasterizer {
/// Makes a new Rasterizer to draw to a screen-sized buffer
///
/// # Arguments
/// * 'width' - Horizontal size of the framebuffer.
/// * 'height' - Vertical size of the framebuffer.
pub fn new(width: usize, height: usize) -> Rasterizer {
//println!("Rasterizer: {} x {} x {}, Memory: {}B", width, height, 4, (width * height * 4));
Rasterizer {
pset_op: pset_opaque,
framebuffer: FrameBuffer::new(width, height),
draw_mode: DrawMode::Opaque,
tint: Color::white(),
opacity: 255,
wrapping: false,
use_fast_alpha_blend: true,
//collected_pixels: Vec::new(),
camera_x: 0,
camera_y: 0,
drawn_pixels_since_cls: 0,
time_since_cls: std::time::Duration::new(0, 0),
}
}
/// Clears the framebuffer and changes its width and height to new values.
pub fn resize_framebuffer(&mut self, width: usize, height: usize) {
self.framebuffer = FrameBuffer::new(width, height);
}
/// Sets the rasterizers drawing mode for incoming pixels. Should be defined before every drawing operation.
/// # Arguments
/// * 'mode' - Which drawing function should the Rasterizer use.
pub fn set_draw_mode(&mut self, mode: DrawMode) {
match mode {
DrawMode::NoOp => { self.pset_op = pset_noop;}
DrawMode::NoAlpha => {self.pset_op = pset_noalpha;}
DrawMode::Opaque => {self.pset_op = pset_opaque;},
DrawMode::Alpha => {self.pset_op = pset_alpha;},
DrawMode::Addition => {self.pset_op = pset_addition;},
DrawMode::Multiply => {self.pset_op = pset_multiply;}
DrawMode::InvertedAlpha => {self.pset_op = pset_inverted_alpha;}
DrawMode::InvertedOpaque => {self.pset_op = pset_inverted_opaque;}
//DrawMode::Collect => {self.pset_op = pset_collect;}
_ => {},
}
}
fn blend_color(&mut self, src: Color, dst: Color) -> Color {
if self.use_fast_alpha_blend {
Color::blend_fast(src, dst, self.opacity)
} else {
Color::blend(src, dst, self.opacity as f32 / 255.0)
}
}
/// Clears the frame memory directly, leaving a black screen.
pub fn cls(&mut self) {
self.framebuffer.color = vec![0; self.framebuffer.width * self.framebuffer.height * 4];
self.drawn_pixels_since_cls = 0;
}
/// Clears the screen to a color.
/// # Arguments
/// * 'color' - Color the screen should be cleared too.
pub fn cls_color(&mut self, color: Color) {
self.framebuffer.color.chunks_exact_mut(4).for_each(|c| {
c[0] = color.r;
c[1] = color.g;
c[2] = color.b;
c[3] = color.a;
});
self.drawn_pixels_since_cls = 0;
}
/* /// Clears the collected_pixels buffer. Does not resize to zero.
pub fn cls_collected(&mut self) {
self.collected_pixels.clear();
} */
pub fn blend_rasterizer(&mut self, rasterizer: &mut Rasterizer, opacity: u8) {
if opacity == 0 { return; }
for y in 0..self.framebuffer.height {
for x in 0..self.framebuffer.width {
let (px, py) = (x as i32, y as i32);
self.pset(px, py, rasterizer.pget(px, py));
}
}
}
pub fn blend_frame_alpha(&mut self, framebuffer: &FrameBuffer, opacity: u8) {
if opacity == 0 { return; }
if self.framebuffer.width != framebuffer.width || self.framebuffer.height != framebuffer.height { return; }
self.framebuffer.color.chunks_exact_mut(4).enumerate().for_each(|(i, c)| {
let dst_color = Color::new(c[0], c[1], c[2], c[3]);
let src_color = Color::new(framebuffer.color[(i*4) + 0], framebuffer.color[(i*4) + 1], framebuffer.color[(i*4) + 2], framebuffer.color[(i*4) + 3]);
if src_color.a == 0 { return; }
let fc = Color::blend_fast(dst_color, src_color, 255 - opacity);
c[0] = fc.r;
c[1] = fc.g;
c[2] = fc.b;
c[3] = fc.a;
});
}
pub fn blend_frame_multiply(&mut self, framebuffer: &FrameBuffer) {
if self.framebuffer.width != framebuffer.width || self.framebuffer.height != framebuffer.height { return; }
self.framebuffer.color.chunks_exact_mut(4).enumerate().for_each(|(i, c)| {
let dst_color = Color::new(c[0], c[1], c[2], c[3]);
let src_color = Color::new(framebuffer.color[(i*4) + 0], framebuffer.color[(i*4) + 1], framebuffer.color[(i*4) + 2], framebuffer.color[(i*4) + 3]);
if src_color.a == 0 { return; }
let fc = Color::blend_fast(src_color, dst_color, 255) * dst_color;
c[0] = fc.r;
c[1] = fc.g;
c[2] = fc.b;
c[3] = fc.a;
});
}
/// Draws a pixel to the color buffer, using the rasterizers set DrawMode. DrawMode defaults to Opaque.
pub fn pset(&mut self, x: i32, y: i32, color: Color) {
let mut x = -self.camera_x as i32 + x;
let mut y = -self.camera_y as i32 + y;
let mut idx: usize = ((y * (self.framebuffer.width as i32) + x) * 4) as usize;
if !self.wrapping {
let out_left: bool = x < 0;
let out_right: bool = x > (self.framebuffer.width) as i32 - 1;
let out_top: bool = y < 0;
let out_bottom: bool = y > (self.framebuffer.height) as i32 - 1;
let out_of_range: bool = idx > (self.framebuffer.width * self.framebuffer.height * 4) - 1;
if out_of_range || out_left || out_right || out_top || out_bottom { return; }
} else {
x = modi(x, self.framebuffer.width as i32);
y = modi(y, self.framebuffer.height as i32);
idx = ((y * (self.framebuffer.width as i32) + x) * 4) as usize;
if idx > (self.framebuffer.width * self.framebuffer.height * 4) - 1 { return; }
}
// We have to put paraenthesis around the fn() variables or else the compiler will think it's a method.
(self.pset_op)(self, idx, color);
}
/// Gets a color from the color buffer.
pub fn pget(&mut self, x: i32, y: i32) -> Color {
let idx: usize = ((y * (self.framebuffer.width as i32) + x) * 4) as usize;
let out_left: bool = x < 0;
let out_right: bool = x > (self.framebuffer.width) as i32 - 1;
let out_top: bool = y < 0;
let out_bottom: bool = y > (self.framebuffer.height) as i32 - 1;
let out_of_range: bool = idx > (self.framebuffer.width * self.framebuffer.height * 4) - 1;
if out_of_range || out_left || out_right || out_top || out_bottom { return Color::black(); }
return Color::new(
self.framebuffer.color[idx + 0],
self.framebuffer.color[idx + 1],
self.framebuffer.color[idx + 2],
self.framebuffer.color[idx + 3]
);
}
/// Draws a line using the Bresenham algorithm across two points
pub fn pline(&mut self, x0: i32, y0: i32, x1: i32, y1: i32, color: Color) {
// Create local variables for moving start point
let mut x0 = x0;
let mut y0 = y0;
// Get absolute x/y offset
let dx = if x0 > x1 { x0 - x1 } else { x1 - x0 };
let dy = if y0 > y1 { y0 - y1 } else { y1 - y0 };
// Get slopes
let sx = if x0 < x1 { 1 } else { -1 };
let sy = if y0 < y1 { 1 } else { -1 };
// Initialize error
let mut err = if dx > dy { dx } else {-dy} / 2;
let mut err2;
loop {
// Set pixel
self.pset(x0, y0, color);
// Check end condition
if x0 == x1 && y0 == y1 { break };
// Store old error
err2 = 2 * err;
// Adjust error and start position
if err2 > -dx { err -= dy; x0 += sx; }
if err2 < dy { err += dx; y0 += sy; }
}
}
/// Draws a line using the Bresenham algorithm across two points. This second variation uses thickness
pub fn pline2(&mut self, x0: i32, y0: i32, x1: i32, y1: i32, thickness: i32, color: Color) {
// Create local variables for moving start point
let mut x0 = x0;
let mut y0 = y0;
// Get absolute x/y offset
let dx = if x0 > x1 { x0 - x1 } else { x1 - x0 };
let dy = if y0 > y1 { y0 - y1 } else { y1 - y0 };
// Get slopes
let sx = if x0 < x1 { 1 } else { -1 };
let sy = if y0 < y1 { 1 } else { -1 };
// Initialize error
let mut err = if dx > dy { dx } else {-dy} / 2;
let mut err2;
loop {
// Set pixel
self.pcircle(true, x0, y0, thickness, color);
// Check end condition
if x0 == x1 && y0 == y1 { break };
// Store old error
err2 = 2 * err;
// Adjust error and start position
if err2 > -dx { err -= dy; x0 += sx; }
if err2 < dy { err += dx; y0 += sy; }
}
}
/// Draws a rectangle onto the screen. Can either be filled or outlined.
pub fn prectangle(&mut self, filled: bool, x: i32, y: i32, w: i32, h: i32, color: Color) {
let x0 = x;
let x1 = x + (w-1);
let y0 = y;
let y1 = y + h;
if filled {
for i in y0..y1 {
self.pline(x0, i, x1, i, color);
}
} else {
self.pline(x0, y0, x1, y0, color);
self.pline(x0, y0, x0, y1, color);
self.pline(x0, y1, x1, y1, color);
self.pline(x1, y0, x1, y1, color);
}
}
/// Draws a circle onto the screen. Can either be filled or outlined.
pub fn pcircle(&mut self, filled: bool, xc: i32, yc: i32, r: i32, color: Color) {
let mut x: i32 = 0;
let mut y: i32 = r;
let mut d: i32 = 3 - 2 * r;
if !filled {
self.pset(xc+x, yc+y, color);
self.pset(xc-x, yc+y, color);
self.pset(xc+x, yc-y, color);
self.pset(xc-x, yc-y, color);
self.pset(xc+y, yc+x, color);
self.pset(xc-y, yc+x, color);
self.pset(xc+y, yc-x, color);
self.pset(xc-y, yc-x, color);
} else {
self.pline(xc+x, yc+y, xc-x, yc+y, color);
self.pline(xc+x, yc-y, xc-x, yc-y, color);
self.pline(xc+y, yc+x, xc-y, yc+x, color);
self.pline(xc+y, yc-x, xc-y, yc-x, color);
}
while y >= x
{
x += 1;
if d > 0 {
y -= 1;
d = d + 4 * (x - y) + 10;
} else {
d = d + 4 * x + 6;
}
if !filled {
self.pset(xc+x, yc+y, color);
self.pset(xc-x, yc+y, color);
self.pset(xc+x, yc-y, color);
self.pset(xc-x, yc-y, color);
self.pset(xc+y, yc+x, color);
self.pset(xc-y, yc+x, color);
self.pset(xc+y, yc-x, color);
self.pset(xc-y, yc-x, color);
} else {
self.pline(xc+x, yc+y, xc-x, yc+y, color);
self.pline(xc+x, yc-y, xc-x, yc-y, color);
self.pline(xc+y, yc+x, xc-y, yc+x, color);
self.pline(xc+y, yc-x, xc-y, yc-x, color);
}
}
}
/// Draws an image directly to the screen.
pub fn pimg(&mut self, image: &Image, x: i32, y: i32) {
for ly in 0..image.height {
for lx in 0..image.width {
let pc = image.pget(lx as i32, ly as i32);
if pc.a <= 0 { continue; }
self.pset(x + lx as i32, y + ly as i32, pc);
}
}
}
/// Draws a section of an image directly to the screen.
pub fn pimgrect(&mut self, image: &Image, x: i32, y: i32, rx: usize, ry: usize, rw: usize, rh: usize) {
let range_x = rx + rw;
let range_y = ry + rh;
for ly in ry..range_y {
for lx in rx..range_x {
let mlx = lx % image.width;
let mly = ly % image.height;
let px: i32 = (x + mlx as i32) - rx as i32;
let py: i32 = (y + mly as i32) - ry as i32;
self.pset(px, py, image.pget(mlx as i32, mly as i32));
}
}
}
/// Draws a rotated and scaled image to the screen using matrix multiplication.
pub fn pimgmtx(&mut self, image: &Image, position: Vector2, rotation: f32, scale: Vector2, offset: Vector2) {
let mtx_o = Matrix3::translated(offset);
let mtx_r = Matrix3::rotated(rotation);
let mtx_p = Matrix3::translated(position);
let mtx_s = Matrix3::scaled(scale);
let cmtx = mtx_p * mtx_r * mtx_s * mtx_o;
// We have to get the rotated bounding box of the rotated sprite in order to draw it correctly without blank pixels
let start_center: Vector2 = cmtx.forward(Vector2::zero());
let (mut sx, mut sy, mut ex, mut ey) = (start_center.x, start_center.y, start_center.x, start_center.y);
// Top-Left Corner
let p1: Vector2 = cmtx.forward(Vector2::zero());
sx = f32::min(sx, p1.x); sy = f32::min(sy, p1.y);
ex = f32::max(ex, p1.x); ey = f32::max(ey, p1.y);
// Bottom-Right Corner
let p2: Vector2 = cmtx.forward(Vector2::new(image.width as f32, image.height as f32));
sx = f32::min(sx, p2.x); sy = f32::min(sy, p2.y);
ex = f32::max(ex, p2.x); ey = f32::max(ey, p2.y);
// Bottom-Left Corner
let p3: Vector2 = cmtx.forward(Vector2::new(0.0, image.height as f32));
sx = f32::min(sx, p3.x); sy = f32::min(sy, p3.y);
ex = f32::max(ex, p3.x); ey = f32::max(ey, p3.y);
// Top-Right Corner
let p4: Vector2 = cmtx.forward(Vector2::new(image.width as f32, 0.0));
sx = f32::min(sx, p4.x); sy = f32::min(sy, p4.y);
ex = f32::max(ex, p4.x); ey = f32::max(ey, p4.y);
let mut rsx = sx as i32;
let mut rsy = sy as i32;
let mut rex = ex as i32;
let mut rey = ey as i32;
// Sprite isn't even in frame, don't draw anything
if (rex < 0 || rsx > self.framebuffer.width as i32) && (rey < 0 || rsy > self.framebuffer.height as i32) { return; }
// Okay but clamp the ranges in frame so we're not wasting time on stuff offscreen
if rsx < 0 { rsx = 0;}
if rsy < 0 { rsy = 0;}
if rex > self.framebuffer.width as i32 { rex = self.framebuffer.width as i32; }
if rey > self.framebuffer.height as i32 { rey = self.framebuffer.height as i32; }
let cmtx_inv = cmtx.clone().inv();
// We can finally draw!
// Noticed some weird clipping on the right side of sprites, like the BB isn't big enough? Just gonna add some more pixels down and right just in case
for ly in rsy..rey+8 {
for lx in rsx..rex+8 {
// We have to use the inverted compound matrix (cmtx_inv) in order to get the correct pixel data from the image.
let ip: Vector2 = cmtx_inv.forward(Vector2::new(lx as f32, ly as f32));
let color: Color = image.pget(ip.x as i32, ip.y as i32);
if color.a <= 0 { continue; }
self.pset(lx as i32, ly as i32, color);
}
}
}
/// Draws text directly to the screen using a provided font.
pub fn pprint(&mut self, font: &Font, text: String, x: i32, y: i32) {
let mut jumpx: isize = 0;
let mut jumpy: isize = 0;
let chars: Vec<char> = text.chars().collect();
for i in 0..chars.len() {
if chars[i] == '\n' { jumpy += font.glyph_height as isize; jumpx = 0; continue; }
if chars[i] == ' ' { jumpx += font.glyph_width as isize; continue; }
for j in 0..font.glyphidx.len() {
if font.glyphidx[j] == chars[i] {
let rectx: usize = (j * font.glyph_width) % (font.fontimg.width);
let recty: usize = ((j * font.glyph_width) / font.fontimg.width) * font.glyph_height;
let rectw: usize = font.glyph_width;
let recth: usize = font.glyph_height;
self.pimgrect(&font.fontimg, x + jumpx as i32, y + jumpy as i32, rectx, recty, rectw, recth);
jumpx += font.glyph_width as isize + font.glyph_spacing as isize;
}
}
}
}
/// Draws a triangle directly to the screen.
pub fn ptriangle(&mut self, filled: bool, v1x: i32, v1y: i32, v2x: i32, v2y: i32, v3x: i32, v3y: i32, color: Color) {
if filled {
// Collect pixels from lines without drawing to the screen
let vl12 = self.cline(v1x, v1y, v2x, v2y);
let vl13 = self.cline(v1x, v1y, v3x, v3y);
let vl23 = self.cline(v2x, v2y, v3x, v3y);
let mut all_pixels: Vec<(i32, i32)> = Vec::new();
for p1 in vl12 {
all_pixels.push(p1);
}
for p2 in vl13 {
all_pixels.push(p2)
}
for p3 in vl23 {
all_pixels.push(p3);
}
// Sort by row
all_pixels.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap());
let mut scanline_rows: Vec<Vec<(i32, i32)>> = vec![Vec::new(); self.framebuffer.height];
for p in all_pixels {
if p.1 > 0 && p.1 < self.framebuffer.height as i32 - 1{
scanline_rows[p.1 as usize].push(p);
}
}
for row in scanline_rows {
if row.len() == 0 { continue; }
let height = row[0].1;
self.pline(row[0].0, height, row[row.len()-1].0, height, color);
}
self.pline(v1x, v1y, v2x, v2y, color);
self.pline(v1x, v1y, v3x, v3y, color);
self.pline(v2x, v2y, v3x, v3y, color);
} else {
self.pline(v1x, v1y, v2x, v2y, color);
self.pline(v1x, v1y, v3x, v3y, color);
self.pline(v2x, v2y, v3x, v3y, color);
}
}
/// Untested but comes from OneLoneCoders 3D Software Rendering series. Some help would be wonderful, I'm still very confused.
pub fn ptritex(&mut self, image: &Image, x1: i64, y1: i64, u1: f64, v1: f64, w1: f64,
x2: i64, y2: i64, u2: f64, v2: f64, w2: f64,
x3: i64, y3: i64, u3: f64, v3: f64, w3: f64)
{
// We need to put all this stuff into local scope so it doesn't break
let mut x1: i64 = x1;
let mut y1: i64 = y1;
let mut u1: f64 = u1;
let mut v1: f64 = v1;
let mut w1: f64 = w1;
let mut x2: i64 = x2;
let mut y2: i64 = y2;
let mut u2: f64 = u2;
let mut v2: f64 = v2;
let mut w2: f64 = w2;
let mut x3: i64 = x3;
let mut y3: i64 = y3;
let mut u3: f64 = u3;
let mut v3: f64 = v3;
let mut w3: f64 = w3;
if y2 < y1 {
std::mem::swap(&mut y1, &mut y2);
std::mem::swap(&mut x1, &mut x2);
std::mem::swap(&mut u1, &mut u2);
std::mem::swap(&mut v1, &mut v2);
std::mem::swap(&mut w1, &mut w2);
}
if y3 < y1 {
std::mem::swap(&mut y1, &mut y3);
std::mem::swap(&mut x1, &mut x3);
std::mem::swap(&mut u1, &mut u3);
std::mem::swap(&mut v1, &mut v3);
std::mem::swap(&mut w1, &mut w3);
}
if y3 < y2 {
std::mem::swap(&mut y2, &mut y3);
std::mem::swap(&mut x2, &mut x3);
std::mem::swap(&mut u2, &mut u3);
std::mem::swap(&mut v2, &mut v3);
std::mem::swap(&mut w2, &mut w3);
}
let mut dy1: i64 = y2 - y1;
let mut dx1: i64 = x2 - x1;
let mut dv1: f64 = v2 - v1;
let mut du1: f64 = u2 - u1;
let mut dw1: f64 = w2 - w1;
let mut dy2: i64 = y3 - y1;
let mut dx2: i64 = x3 - x1;
let mut du2: f64 = u3 - u1;
let mut dv2: f64 = v3 - v1;
let mut dw2: f64 = u3 - u1;
let mut sdw2: f64 = w3 - w1;
let mut tex_u: f64 = 0.0;
let mut tex_v: f64 = 0.0;
let mut tex_w: f64 = 0.0;
let mut dax_step: f64 = 0.0;
let mut dbx_step: f64 = 0.0;
let mut du1_step: f64 = 0.0;
let mut dv1_step: f64 = 0.0;
let mut du2_step: f64 = 0.0;
let mut dv2_step: f64 = 0.0;
let mut dw1_step: f64 = 0.0;
let mut dw2_step: f64 = 0.0;
if dy1 > 0 { dax_step = dx1 as f64 / dy1.abs() as f64; }
if dy2 > 0 { dbx_step = dx2 as f64 / dy2.abs() as f64; }
if dy1 > 0 { du1_step = du1 as f64 / dy1.abs() as f64; }
if dy1 > 0 { dv1_step = dv1 as f64 / dy1.abs() as f64; }
if dy1 > 0 { dw1_step = dw1 as f64 / dy1.abs() as f64; }
if dy2 > 0 { du2_step = du2 as f64 / dy2.abs() as f64; }
if dy2 > 0 { dv2_step = dv2 as f64 / dy2.abs() as f64; }
if dy2 > 0 { dw2_step = dw2 as f64 / dy2.abs() as f64; }
// Drawing top half of triangle
if dy1 > 0 {
for i in y1..y2 {
let mut ax: i64 = (x1 as f64 + (i - y1) as f64 * dax_step) as i64;
let mut bx: i64 = (x1 as f64 + (i - y1) as f64 * dbx_step) as i64;
let mut tex_su: f64 = u1 as f64 + (i - y1) as f64 * du1_step;
let mut tex_sv: f64 = v1 as f64 + (i - y1) as f64 * dv1_step;
let mut tex_sw: f64 = w1 as f64 + (i - y1) as f64 * dw1_step;
let mut tex_eu: f64 = u1 as f64 + (i - y1) as f64 * du2_step;
let mut tex_ev: f64 = v1 as f64 + (i - y1) as f64 * dv2_step;
let mut tex_ew: f64 = w1 as f64 + (i - y1) as f64 * dw2_step;
if ax > bx {
std::mem::swap(&mut ax, &mut bx);
std::mem::swap(&mut tex_su, &mut tex_eu);
std::mem::swap(&mut tex_sv, &mut tex_ev);
std::mem::swap(&mut tex_sw, &mut tex_ew);
}
tex_u = tex_su;
tex_v = tex_sv;
tex_w = tex_sw;
let mut tstep: f64 = 1.0 / (bx - ax) as f64;
let mut t: f64 = 0.0;
for j in ax..bx {
tex_u = (1.0 - t) * tex_su + t * tex_eu;
tex_v = (1.0 - t) * tex_sv + t * tex_ev;
tex_w = (1.0 - t) * tex_sw + t * tex_ew;
//if tex_w > self.dget(j, i) {
let px: i32 = (tex_u / tex_w) as i32;
let py: i32 = (tex_v / tex_w) as i32;
let color = image.pget(px, py);
self.pset(j as i32, i as i32, color);
//self.dset(j, i, tex_w);
//}
t += tstep;
}
}
}
// Drawing bottom half of triangle
dy1 = y3 - y2;
dx1 = x3 - x2;
dv1 = v3 - v2;
du1 = u3 - u2;
dw1 = w3 - w2;
if dy1 > 0 { dax_step = dx1 as f64 / dy1.abs() as f64; }
if dy2 > 0 { dbx_step = dx2 as f64 / dy2.abs() as f64; }
du1_step = 0.0;
dv1_step = 0.0;
if dy1 > 0 { du1_step = du1 / dy1.abs() as f64; }
if dy1 > 0 { dv1_step = dv1 / dy1.abs() as f64; }
if dy1 > 0 { dw1_step = dw1 / dy1.abs() as f64; }
if dy1 > 0 {
for i in y2..y3 {
let mut ax: i64 = ((x2 as f64 + (i - y2) as f64) * dax_step) as i64;
let mut bx: i64 = ((x1 as f64 + (i - y1) as f64) * dbx_step) as i64;
let mut tex_su: f64 = u2 + ((i - y2) as f64) * du1_step;
let mut tex_sv: f64 = v2 + ((i - y2) as f64) * dv1_step;
let mut tex_sw: f64 = w2 + ((i - y2) as f64) * dw1_step;
let mut tex_eu: f64 = u1 + ((i - y1) as f64) * du2_step;
let mut tex_ev: f64 = v1 + ((i - y1) as f64) * dv2_step;
let mut tex_ew: f64 = w1 + ((i - y1) as f64) * dw2_step;
if ax > bx {
std::mem::swap(&mut ax, &mut bx);
std::mem::swap(&mut tex_su, &mut tex_eu);
std::mem::swap(&mut tex_sv, &mut tex_ev);
std::mem::swap(&mut tex_sw, &mut tex_ew);
}
tex_u = tex_su;
tex_v = tex_sv;
tex_w = tex_sw;
let mut tstep: f64 = 1.0 / (bx - ax) as f64;
let mut t: f64 = 0.0;
for j in ax..bx {
tex_u = (1.0 - t) * tex_su + t * tex_eu;
tex_v = (1.0 - t) * tex_sv + t * tex_ev;
tex_w = (1.0 - t) * tex_sw + t * tex_ew;
//if tex_w > self.dget(j, i) {
let px: i32 = (tex_u / tex_w) as i32;
let py: i32 = (tex_v / tex_w) as i32;
let color = image.pget(px, py);
self.pset(j as i32, i as i32, color);
//self.dset(j, i, tex_w);
//}
t += tstep;
}
}
}
}
/// Draws a quadratic beizer curve onto the screen.
pub fn pbeizer(&mut self, thickness: i32, x0: i32, y0: i32, x1: i32, y1: i32, mx: i32, my: i32, color: Color) {
let mut step: f32 = 0.0;
// Get the maximal number of pixels we will need to use and get its inverse as a step size.
// Otherwise we don't know how many pixels we will need to draw
let stride_c1 = self.cline(x0, y0, mx, my).len() as f32;
let stride_c2 = self.cline(mx, my, x1, y1).len() as f32;
let stride: f32 = (1.0 / (stride_c1 + stride_c2)) * 0.5;
let x0 = x0 as f32;
let y0 = x0 as f32;
let x1 = x1 as f32;
let y1 = y1 as f32;
let mx = mx as f32;
let my = my as f32;
loop {
if step > 1.0 { break; }
let px0 = lerpf(x0, mx, step);
let py0 = lerpf(y0, my, step);
let px1 = lerpf(px0, x1, step);
let py1 = lerpf(py0, y1, step);
self.pcircle(true, px1 as i32, py1 as i32, thickness, color);
step += stride;
}
}
/// Draws a cubic beizer curve onto the screen.
pub fn pbeizer_cubic(&mut self, x0: i32, y0: i32, x1: i32, y1: i32, mx0: i32, my0: i32, mx1: i32, my1: i32, color: Color) {
let mut step: f32 = 0.0;
// Get the maximal number of pixels we will need to use and get its inverse as a step size.
// Otherwise we don't know how many pixels we will need to draw
let stride_c1: f32 = self.cline(x0, y0, mx0, my0).len() as f32;
let stride_c2: f32 = self.cline(mx0, my0, mx1, my1).len() as f32;
let stride_c3: f32 = self.cline(mx1, my1, x1, y1).len() as f32;
let stride = (1.0 / (stride_c1 + stride_c2 + stride_c3)) * 0.5;
let x0 = x0 as f32;
let y0 = x0 as f32;
let x1 = x1 as f32;
let y1 = y1 as f32;
let mx0 = mx0 as f32;
let my0 = my0 as f32;
let mx1 = mx1 as f32;
let my1 = my1 as f32;
loop {
if step > 1.0 { break; }
let px0 = lerpf(x0, mx0, step);
let py0 = lerpf(y0, my0, step);
let px1 = lerpf(px0, mx1, step);
let py1 = lerpf(py0, my1, step);
let px2 = lerpf(px1, x1, step);
let py2 = lerpf(py1, y1, step);
self.pset(px2 as i32, py2 as i32, color);
step += stride;
}
}
// Collecting versions of drawing functions
/// Returns pixel positions across the line.
pub fn cline(&mut self, x0: i32, y0: i32, x1: i32, y1: i32) -> Vec<(i32, i32)> {
let mut pixels: Vec<(i32, i32)> = Vec::new();
// Create local variables for moving start point
let mut x0 = x0;
let mut y0 = y0;
// Get absolute x/y offset
let dx = if x0 > x1 { x0 - x1 } else { x1 - x0 };
let dy = if y0 > y1 { y0 - y1 } else { y1 - y0 };
// Get slopes
let sx = if x0 < x1 { 1 } else { -1 };
let sy = if y0 < y1 { 1 } else { -1 };
// Initialize error
let mut err = if dx > dy { dx } else {-dy} / 2;
let mut err2;
loop {
// Set pixel
pixels.push((x0, y0));
// Check end condition
if x0 == x1 && y0 == y1 { break };
// Store old error
err2 = 2 * err;
// Adjust error and start position
if err2 > -dx { err -= dy; x0 += sx; }
if err2 < dy { err += dx; y0 += sy; }
}
return pixels;
}
} | 38.02079 | 159 | 0.511374 |
cc536f7de93dd1240590a634131afe4296ecba97 | 1,031 | //! Session IDs: the YubiHSM2 supports up to 16 concurrent sessions.
use super::{Error, ErrorKind::ProtocolError};
use std::fmt::{self, Display};
/// Maximum session identifier
pub const MAX_SESSION_ID: Id = Id(16);
/// Session/Channel IDs
#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)]
pub struct Id(u8);
impl Id {
/// Create a new session ID from a byte value
pub fn from_u8(id: u8) -> Result<Self, Error> {
if id > MAX_SESSION_ID.0 {
fail!(
ProtocolError,
"session ID exceeds the maximum allowed: {} (max {})",
id,
MAX_SESSION_ID.0
);
}
Ok(Id(id))
}
/// Obtain the next session ID
pub fn succ(self) -> Result<Self, Error> {
Self::from_u8(self.0 + 1)
}
/// Obtain session ID as a u8
pub fn to_u8(self) -> u8 {
self.0
}
}
impl Display for Id {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0.fmt(f)
}
}
| 23.431818 | 70 | 0.553831 |
8aeb5f60e766713a2f06e2c72ae3e08fbc67a009 | 161 | use auto_impl::auto_impl;
#[auto_impl(Fn)]
trait Foo {
fn execute(&self);
}
fn foo(_: impl Foo) {}
fn bar() {
// Fn
foo(|| {});
}
fn main() {}
| 8.944444 | 25 | 0.509317 |
ac5ae69c4b1de736a48fa1844cb03a5ce43e1946 | 69,064 | // Copyright © 2020, Oracle and/or its affiliates.
//
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
//
// Copyright © 2019 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
//
use crate::config::CpusConfig;
use crate::device_manager::DeviceManager;
use crate::memory_manager::MemoryManager;
use crate::seccomp_filters::{get_seccomp_filter, Thread};
#[cfg(target_arch = "x86_64")]
use crate::vm::physical_bits;
use crate::GuestMemoryMmap;
use crate::CPU_MANAGER_SNAPSHOT_ID;
#[cfg(feature = "acpi")]
use acpi_tables::{aml, aml::Aml, sdt::Sdt};
use anyhow::anyhow;
use arch::EntryPoint;
#[cfg(any(target_arch = "aarch64", feature = "acpi"))]
use arch::NumaNodes;
use devices::interrupt_controller::InterruptController;
#[cfg(target_arch = "aarch64")]
use hypervisor::kvm::kvm_bindings;
#[cfg(target_arch = "x86_64")]
use hypervisor::CpuId;
use hypervisor::{vm::VmmOps, CpuState, HypervisorCpuError, VmExit};
use libc::{c_void, siginfo_t};
use seccompiler::{apply_filter, SeccompAction};
use std::collections::BTreeMap;
use std::os::unix::thread::JoinHandleExt;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Barrier, Mutex};
use std::{cmp, io, result, thread};
use vm_device::BusDevice;
#[cfg(feature = "acpi")]
use vm_memory::GuestAddress;
use vm_memory::GuestMemoryAtomic;
use vm_migration::{
Migratable, MigratableError, Pausable, Snapshot, SnapshotDataSection, Snapshottable,
Transportable,
};
use vmm_sys_util::eventfd::EventFd;
use vmm_sys_util::signal::{register_signal_handler, SIGRTMIN};
#[cfg(feature = "acpi")]
pub const CPU_MANAGER_ACPI_SIZE: usize = 0xc;
#[derive(Debug)]
pub enum Error {
/// Cannot create the vCPU.
VcpuCreate(anyhow::Error),
/// Cannot run the VCPUs.
VcpuRun(anyhow::Error),
/// Cannot spawn a new vCPU thread.
VcpuSpawn(io::Error),
/// Cannot generate common CPUID
CommonCpuId(arch::Error),
/// Error configuring VCPU
VcpuConfiguration(arch::Error),
#[cfg(target_arch = "aarch64")]
/// Error fetching prefered target
VcpuArmPreferredTarget(hypervisor::HypervisorVmError),
#[cfg(target_arch = "aarch64")]
/// Error doing vCPU init on Arm.
VcpuArmInit(hypervisor::HypervisorCpuError),
/// Failed to join on vCPU threads
ThreadCleanup(std::boxed::Box<dyn std::any::Any + std::marker::Send>),
/// Cannot add legacy device to Bus.
BusError(vm_device::BusError),
/// Asking for more vCPUs that we can have
DesiredVCpuCountExceedsMax,
/// Cannot create seccomp filter
CreateSeccompFilter(seccompiler::Error),
/// Cannot apply seccomp filter
ApplySeccompFilter(seccompiler::Error),
/// Error starting vCPU after restore
StartRestoreVcpu(anyhow::Error),
/// Error because an unexpected VmExit type was received.
UnexpectedVmExit,
/// Failed to allocate MMIO address
AllocateMmmioAddress,
#[cfg(feature = "tdx")]
InitializeTdx(hypervisor::HypervisorCpuError),
#[cfg(target_arch = "aarch64")]
InitPmu(hypervisor::HypervisorCpuError),
/// Failed scheduling the thread on the expected CPU set.
ScheduleCpuSet,
}
pub type Result<T> = result::Result<T, Error>;
#[cfg(all(target_arch = "x86_64", feature = "acpi"))]
#[allow(dead_code)]
#[repr(packed)]
struct LocalApic {
pub r#type: u8,
pub length: u8,
pub processor_id: u8,
pub apic_id: u8,
pub flags: u32,
}
#[allow(dead_code)]
#[repr(packed)]
#[derive(Default)]
struct Ioapic {
pub r#type: u8,
pub length: u8,
pub ioapic_id: u8,
_reserved: u8,
pub apic_address: u32,
pub gsi_base: u32,
}
#[cfg(all(target_arch = "aarch64", feature = "acpi"))]
#[allow(dead_code)]
#[repr(packed)]
struct GicC {
pub r#type: u8,
pub length: u8,
pub reserved0: u16,
pub cpu_interface_number: u32,
pub uid: u32,
pub flags: u32,
pub parking_version: u32,
pub performance_interrupt: u32,
pub parked_address: u64,
pub base_address: u64,
pub gicv_base_address: u64,
pub gich_base_address: u64,
pub vgic_interrupt: u32,
pub gicr_base_address: u64,
pub mpidr: u64,
pub proc_power_effi_class: u8,
pub reserved1: u8,
pub spe_overflow_interrupt: u16,
}
#[cfg(all(target_arch = "aarch64", feature = "acpi"))]
#[allow(dead_code)]
#[repr(packed)]
struct GicD {
pub r#type: u8,
pub length: u8,
pub reserved0: u16,
pub gic_id: u32,
pub base_address: u64,
pub global_irq_base: u32,
pub version: u8,
pub reserved1: [u8; 3],
}
#[cfg(all(target_arch = "aarch64", feature = "acpi"))]
#[allow(dead_code)]
#[repr(packed)]
struct GicR {
pub r#type: u8,
pub length: u8,
pub reserved: u16,
pub base_address: u64,
pub range_length: u32,
}
#[cfg(all(target_arch = "aarch64", feature = "acpi"))]
#[allow(dead_code)]
#[repr(packed)]
struct GicIts {
pub r#type: u8,
pub length: u8,
pub reserved0: u16,
pub translation_id: u32,
pub base_address: u64,
pub reserved1: u32,
}
#[cfg(all(target_arch = "aarch64", feature = "acpi"))]
#[allow(dead_code)]
#[repr(packed)]
struct ProcessorHierarchyNode {
pub r#type: u8,
pub length: u8,
pub reserved: u16,
pub flags: u32,
pub parent: u32,
pub acpi_processor_id: u32,
pub num_private_resources: u32,
}
#[allow(dead_code)]
#[repr(packed)]
#[derive(Default)]
struct InterruptSourceOverride {
pub r#type: u8,
pub length: u8,
pub bus: u8,
pub source: u8,
pub gsi: u32,
pub flags: u16,
}
/// A wrapper around creating and using a kvm-based VCPU.
pub struct Vcpu {
// The hypervisor abstracted CPU.
vcpu: Arc<dyn hypervisor::Vcpu>,
id: u8,
#[cfg(target_arch = "aarch64")]
mpidr: u64,
saved_state: Option<CpuState>,
}
impl Vcpu {
/// Constructs a new VCPU for `vm`.
///
/// # Arguments
///
/// * `id` - Represents the CPU number between [0, max vcpus).
/// * `vm` - The virtual machine this vcpu will get attached to.
/// * `vmmops` - Optional object for exit handling.
pub fn new(
id: u8,
vm: &Arc<dyn hypervisor::Vm>,
vmmops: Option<Arc<dyn VmmOps>>,
) -> Result<Self> {
let vcpu = vm
.create_vcpu(id, vmmops)
.map_err(|e| Error::VcpuCreate(e.into()))?;
// Initially the cpuid per vCPU is the one supported by this VM.
Ok(Vcpu {
vcpu,
id,
#[cfg(target_arch = "aarch64")]
mpidr: 0,
saved_state: None,
})
}
/// Configures a vcpu and should be called once per vcpu when created.
///
/// # Arguments
///
/// * `kernel_entry_point` - Kernel entry point address in guest memory and boot protocol used.
/// * `vm_memory` - Guest memory.
/// * `cpuid` - (x86_64) CpuId, wrapper over the `kvm_cpuid2` structure.
pub fn configure(
&mut self,
#[cfg(target_arch = "aarch64")] vm: &Arc<dyn hypervisor::Vm>,
kernel_entry_point: Option<EntryPoint>,
#[cfg(target_arch = "x86_64")] vm_memory: &GuestMemoryAtomic<GuestMemoryMmap>,
#[cfg(target_arch = "x86_64")] cpuid: CpuId,
#[cfg(target_arch = "x86_64")] kvm_hyperv: bool,
) -> Result<()> {
#[cfg(target_arch = "aarch64")]
{
self.init(vm)?;
self.mpidr = arch::configure_vcpu(&self.vcpu, self.id, kernel_entry_point)
.map_err(Error::VcpuConfiguration)?;
}
info!("Configuring vCPU: cpu_id = {}", self.id);
#[cfg(target_arch = "x86_64")]
arch::configure_vcpu(
&self.vcpu,
self.id,
kernel_entry_point,
vm_memory,
cpuid,
kvm_hyperv,
)
.map_err(Error::VcpuConfiguration)?;
Ok(())
}
/// Gets the MPIDR register value.
#[cfg(target_arch = "aarch64")]
pub fn get_mpidr(&self) -> u64 {
self.mpidr
}
/// Gets the saved vCPU state.
#[cfg(target_arch = "aarch64")]
pub fn get_saved_state(&self) -> Option<CpuState> {
self.saved_state.clone()
}
/// Initializes an aarch64 specific vcpu for booting Linux.
#[cfg(target_arch = "aarch64")]
pub fn init(&self, vm: &Arc<dyn hypervisor::Vm>) -> Result<()> {
let mut kvi: kvm_bindings::kvm_vcpu_init = kvm_bindings::kvm_vcpu_init::default();
// This reads back the kernel's preferred target type.
vm.get_preferred_target(&mut kvi)
.map_err(Error::VcpuArmPreferredTarget)?;
// We already checked that the capability is supported.
kvi.features[0] |= 1 << kvm_bindings::KVM_ARM_VCPU_PSCI_0_2;
kvi.features[0] |= 1 << kvm_bindings::KVM_ARM_VCPU_PMU_V3;
// Non-boot cpus are powered off initially.
if self.id > 0 {
kvi.features[0] |= 1 << kvm_bindings::KVM_ARM_VCPU_POWER_OFF;
}
self.vcpu.vcpu_init(&kvi).map_err(Error::VcpuArmInit)
}
/// Runs the VCPU until it exits, returning the reason.
///
/// Note that the state of the VCPU and associated VM must be setup first for this to do
/// anything useful.
pub fn run(&self) -> std::result::Result<VmExit, HypervisorCpuError> {
self.vcpu.run()
}
}
const VCPU_SNAPSHOT_ID: &str = "vcpu";
impl Pausable for Vcpu {
fn pause(&mut self) -> std::result::Result<(), MigratableError> {
self.saved_state =
Some(self.vcpu.state().map_err(|e| {
MigratableError::Pause(anyhow!("Could not get vCPU state {:?}", e))
})?);
Ok(())
}
fn resume(&mut self) -> std::result::Result<(), MigratableError> {
if let Some(vcpu_state) = &self.saved_state {
self.vcpu.set_state(vcpu_state).map_err(|e| {
MigratableError::Pause(anyhow!("Could not set the vCPU state {:?}", e))
})?;
}
Ok(())
}
}
impl Snapshottable for Vcpu {
fn id(&self) -> String {
VCPU_SNAPSHOT_ID.to_string()
}
fn snapshot(&mut self) -> std::result::Result<Snapshot, MigratableError> {
let mut vcpu_snapshot = Snapshot::new(&format!("{}", self.id));
vcpu_snapshot.add_data_section(SnapshotDataSection::new_from_state(
VCPU_SNAPSHOT_ID,
&self.saved_state,
)?);
Ok(vcpu_snapshot)
}
fn restore(&mut self, snapshot: Snapshot) -> std::result::Result<(), MigratableError> {
self.saved_state = Some(snapshot.to_state(VCPU_SNAPSHOT_ID)?);
Ok(())
}
}
pub struct CpuManager {
config: CpusConfig,
#[cfg_attr(target_arch = "aarch64", allow(dead_code))]
interrupt_controller: Option<Arc<Mutex<dyn InterruptController>>>,
#[cfg_attr(target_arch = "aarch64", allow(dead_code))]
vm_memory: GuestMemoryAtomic<GuestMemoryMmap>,
#[cfg(target_arch = "x86_64")]
cpuid: CpuId,
#[cfg_attr(target_arch = "aarch64", allow(dead_code))]
vm: Arc<dyn hypervisor::Vm>,
vcpus_kill_signalled: Arc<AtomicBool>,
vcpus_pause_signalled: Arc<AtomicBool>,
exit_evt: EventFd,
#[cfg_attr(target_arch = "aarch64", allow(dead_code))]
reset_evt: EventFd,
vcpu_states: Vec<VcpuState>,
selected_cpu: u8,
vcpus: Vec<Arc<Mutex<Vcpu>>>,
seccomp_action: SeccompAction,
vmmops: Arc<dyn VmmOps>,
#[cfg(feature = "acpi")]
#[cfg_attr(target_arch = "aarch64", allow(dead_code))]
acpi_address: GuestAddress,
#[cfg(feature = "acpi")]
proximity_domain_per_cpu: BTreeMap<u8, u32>,
affinity: BTreeMap<u8, Vec<u8>>,
}
const CPU_ENABLE_FLAG: usize = 0;
const CPU_INSERTING_FLAG: usize = 1;
const CPU_REMOVING_FLAG: usize = 2;
const CPU_EJECT_FLAG: usize = 3;
const CPU_STATUS_OFFSET: u64 = 4;
const CPU_SELECTION_OFFSET: u64 = 0;
impl BusDevice for CpuManager {
fn read(&mut self, _base: u64, offset: u64, data: &mut [u8]) {
// The Linux kernel, quite reasonably, doesn't zero the memory it gives us.
data.fill(0);
match offset {
CPU_SELECTION_OFFSET => {
data[0] = self.selected_cpu;
}
CPU_STATUS_OFFSET => {
if self.selected_cpu < self.max_vcpus() {
let state = &self.vcpu_states[usize::from(self.selected_cpu)];
if state.active() {
data[0] |= 1 << CPU_ENABLE_FLAG;
}
if state.inserting {
data[0] |= 1 << CPU_INSERTING_FLAG;
}
if state.removing {
data[0] |= 1 << CPU_REMOVING_FLAG;
}
} else {
warn!("Out of range vCPU id: {}", self.selected_cpu);
}
}
_ => {
warn!(
"Unexpected offset for accessing CPU manager device: {:#}",
offset
);
}
}
}
fn write(&mut self, _base: u64, offset: u64, data: &[u8]) -> Option<Arc<Barrier>> {
match offset {
CPU_SELECTION_OFFSET => {
self.selected_cpu = data[0];
}
CPU_STATUS_OFFSET => {
if self.selected_cpu < self.max_vcpus() {
let state = &mut self.vcpu_states[usize::from(self.selected_cpu)];
// The ACPI code writes back a 1 to acknowledge the insertion
if (data[0] & (1 << CPU_INSERTING_FLAG) == 1 << CPU_INSERTING_FLAG)
&& state.inserting
{
state.inserting = false;
}
// Ditto for removal
if (data[0] & (1 << CPU_REMOVING_FLAG) == 1 << CPU_REMOVING_FLAG)
&& state.removing
{
state.removing = false;
}
// Trigger removal of vCPU
if data[0] & (1 << CPU_EJECT_FLAG) == 1 << CPU_EJECT_FLAG {
if let Err(e) = self.remove_vcpu(self.selected_cpu) {
error!("Error removing vCPU: {:?}", e);
}
}
} else {
warn!("Out of range vCPU id: {}", self.selected_cpu);
}
}
_ => {
warn!(
"Unexpected offset for accessing CPU manager device: {:#}",
offset
);
}
}
None
}
}
#[derive(Default)]
struct VcpuState {
inserting: bool,
removing: bool,
handle: Option<thread::JoinHandle<()>>,
kill: Arc<AtomicBool>,
vcpu_run_interrupted: Arc<AtomicBool>,
}
impl VcpuState {
fn active(&self) -> bool {
self.handle.is_some()
}
fn signal_thread(&self) {
if let Some(handle) = self.handle.as_ref() {
loop {
unsafe {
libc::pthread_kill(handle.as_pthread_t() as _, SIGRTMIN());
}
if self.vcpu_run_interrupted.load(Ordering::SeqCst) {
break;
} else {
// This is more effective than thread::yield_now() at
// avoiding a priority inversion with the vCPU thread
thread::sleep(std::time::Duration::from_millis(1));
}
}
}
}
fn join_thread(&mut self) -> Result<()> {
if let Some(handle) = self.handle.take() {
handle.join().map_err(Error::ThreadCleanup)?
}
Ok(())
}
fn unpark_thread(&self) {
if let Some(handle) = self.handle.as_ref() {
handle.thread().unpark()
}
}
}
impl CpuManager {
#[allow(unused_variables)]
#[allow(clippy::too_many_arguments)]
pub fn new(
config: &CpusConfig,
device_manager: &Arc<Mutex<DeviceManager>>,
memory_manager: &Arc<Mutex<MemoryManager>>,
vm: Arc<dyn hypervisor::Vm>,
exit_evt: EventFd,
reset_evt: EventFd,
hypervisor: Arc<dyn hypervisor::Hypervisor>,
seccomp_action: SeccompAction,
vmmops: Arc<dyn VmmOps>,
#[cfg(feature = "tdx")] tdx_enabled: bool,
#[cfg(any(target_arch = "aarch64", feature = "acpi"))] numa_nodes: &NumaNodes,
) -> Result<Arc<Mutex<CpuManager>>> {
let guest_memory = memory_manager.lock().unwrap().guest_memory();
let mut vcpu_states = Vec::with_capacity(usize::from(config.max_vcpus));
vcpu_states.resize_with(usize::from(config.max_vcpus), VcpuState::default);
#[cfg(target_arch = "x86_64")]
let sgx_epc_sections = memory_manager
.lock()
.unwrap()
.sgx_epc_region()
.as_ref()
.map(|sgx_epc_region| sgx_epc_region.epc_sections().values().cloned().collect());
#[cfg(target_arch = "x86_64")]
let cpuid = {
let phys_bits = physical_bits(config.max_phys_bits);
arch::generate_common_cpuid(
hypervisor,
config
.topology
.clone()
.map(|t| (t.threads_per_core, t.cores_per_die, t.dies_per_package)),
sgx_epc_sections,
phys_bits,
config.kvm_hyperv,
#[cfg(feature = "tdx")]
tdx_enabled,
)
.map_err(Error::CommonCpuId)?
};
let device_manager = device_manager.lock().unwrap();
#[cfg(feature = "acpi")]
let acpi_address = device_manager
.allocator()
.lock()
.unwrap()
.allocate_platform_mmio_addresses(None, CPU_MANAGER_ACPI_SIZE as u64, None)
.ok_or(Error::AllocateMmmioAddress)?;
#[cfg(feature = "acpi")]
let proximity_domain_per_cpu: BTreeMap<u8, u32> = {
let mut cpu_list = Vec::new();
for (proximity_domain, numa_node) in numa_nodes.iter() {
for cpu in numa_node.cpus.iter() {
cpu_list.push((*cpu, *proximity_domain))
}
}
cpu_list
}
.into_iter()
.collect();
let affinity = if let Some(cpu_affinity) = config.affinity.as_ref() {
cpu_affinity
.iter()
.map(|a| (a.vcpu, a.host_cpus.clone()))
.collect()
} else {
BTreeMap::new()
};
let cpu_manager = Arc::new(Mutex::new(CpuManager {
config: config.clone(),
interrupt_controller: device_manager.interrupt_controller().clone(),
vm_memory: guest_memory,
#[cfg(target_arch = "x86_64")]
cpuid,
vm,
vcpus_kill_signalled: Arc::new(AtomicBool::new(false)),
vcpus_pause_signalled: Arc::new(AtomicBool::new(false)),
vcpu_states,
exit_evt,
reset_evt,
selected_cpu: 0,
vcpus: Vec::with_capacity(usize::from(config.max_vcpus)),
seccomp_action,
vmmops,
#[cfg(feature = "acpi")]
acpi_address,
#[cfg(feature = "acpi")]
proximity_domain_per_cpu,
affinity,
}));
#[cfg(feature = "acpi")]
device_manager
.mmio_bus()
.insert(
cpu_manager.clone(),
acpi_address.0,
CPU_MANAGER_ACPI_SIZE as u64,
)
.map_err(Error::BusError)?;
Ok(cpu_manager)
}
fn create_vcpu(
&mut self,
cpu_id: u8,
entry_point: Option<EntryPoint>,
snapshot: Option<Snapshot>,
) -> Result<Arc<Mutex<Vcpu>>> {
info!("Creating vCPU: cpu_id = {}", cpu_id);
let mut vcpu = Vcpu::new(cpu_id, &self.vm, Some(self.vmmops.clone()))?;
if let Some(snapshot) = snapshot {
// AArch64 vCPUs should be initialized after created.
#[cfg(target_arch = "aarch64")]
vcpu.init(&self.vm)?;
vcpu.restore(snapshot).expect("Failed to restore vCPU");
} else {
#[cfg(target_arch = "x86_64")]
vcpu.configure(
entry_point,
&self.vm_memory,
self.cpuid.clone(),
self.config.kvm_hyperv,
)
.expect("Failed to configure vCPU");
#[cfg(target_arch = "aarch64")]
vcpu.configure(&self.vm, entry_point)
.expect("Failed to configure vCPU");
}
// Adding vCPU to the CpuManager's vCPU list.
let vcpu = Arc::new(Mutex::new(vcpu));
self.vcpus.push(Arc::clone(&vcpu));
Ok(vcpu)
}
/// Only create new vCPUs if there aren't any inactive ones to reuse
fn create_vcpus(&mut self, desired_vcpus: u8, entry_point: Option<EntryPoint>) -> Result<()> {
info!(
"Request to create new vCPUs: desired = {}, max = {}, allocated = {}, present = {}",
desired_vcpus,
self.config.max_vcpus,
self.vcpus.len(),
self.present_vcpus()
);
if desired_vcpus > self.config.max_vcpus {
return Err(Error::DesiredVCpuCountExceedsMax);
}
// Only create vCPUs in excess of all the allocated vCPUs.
for cpu_id in self.vcpus.len() as u8..desired_vcpus {
self.create_vcpu(cpu_id, entry_point, None)?;
}
Ok(())
}
#[cfg(target_arch = "aarch64")]
pub fn init_pmu(&self, irq: u32) -> Result<bool> {
let cpu_attr = kvm_bindings::kvm_device_attr {
group: kvm_bindings::KVM_ARM_VCPU_PMU_V3_CTRL,
attr: u64::from(kvm_bindings::KVM_ARM_VCPU_PMU_V3_INIT),
addr: 0x0,
flags: 0,
};
for cpu in self.vcpus.iter() {
let tmp = irq;
let cpu_attr_irq = kvm_bindings::kvm_device_attr {
group: kvm_bindings::KVM_ARM_VCPU_PMU_V3_CTRL,
attr: u64::from(kvm_bindings::KVM_ARM_VCPU_PMU_V3_IRQ),
addr: &tmp as *const u32 as u64,
flags: 0,
};
// Check if PMU attr is available, if not, log the information.
if cpu.lock().unwrap().vcpu.has_vcpu_attr(&cpu_attr).is_ok() {
// Set irq for PMU
cpu.lock()
.unwrap()
.vcpu
.set_vcpu_attr(&cpu_attr_irq)
.map_err(Error::InitPmu)?;
// Init PMU
cpu.lock()
.unwrap()
.vcpu
.set_vcpu_attr(&cpu_attr)
.map_err(Error::InitPmu)?;
} else {
debug!(
"PMU attribute is not supported in vCPU{}, skip PMU init!",
cpu.lock().unwrap().id
);
return Ok(false);
}
}
Ok(true)
}
fn start_vcpu(
&mut self,
vcpu: Arc<Mutex<Vcpu>>,
vcpu_id: u8,
vcpu_thread_barrier: Arc<Barrier>,
inserting: bool,
) -> Result<()> {
let reset_evt = self.reset_evt.try_clone().unwrap();
let exit_evt = self.exit_evt.try_clone().unwrap();
let panic_exit_evt = self.exit_evt.try_clone().unwrap();
let vcpu_kill_signalled = self.vcpus_kill_signalled.clone();
let vcpu_pause_signalled = self.vcpus_pause_signalled.clone();
let vcpu_kill = self.vcpu_states[usize::from(vcpu_id)].kill.clone();
let vcpu_run_interrupted = self.vcpu_states[usize::from(vcpu_id)]
.vcpu_run_interrupted
.clone();
let panic_vcpu_run_interrupted = vcpu_run_interrupted.clone();
// Prepare the CPU set the current vCPU is expected to run onto.
let cpuset = self.affinity.get(&vcpu_id).map(|host_cpus| {
let mut cpuset: libc::cpu_set_t = unsafe { std::mem::zeroed() };
unsafe { libc::CPU_ZERO(&mut cpuset) };
for host_cpu in host_cpus {
unsafe { libc::CPU_SET(*host_cpu as usize, &mut cpuset) };
}
cpuset
});
// Retrieve seccomp filter for vcpu thread
let vcpu_seccomp_filter = get_seccomp_filter(&self.seccomp_action, Thread::Vcpu)
.map_err(Error::CreateSeccompFilter)?;
#[cfg(target_arch = "x86_64")]
let interrupt_controller_clone = self.interrupt_controller.as_ref().cloned();
info!("Starting vCPU: cpu_id = {}", vcpu_id);
let handle = Some(
thread::Builder::new()
.name(format!("vcpu{}", vcpu_id))
.spawn(move || {
// Schedule the thread to run on the expected CPU set
if let Some(cpuset) = cpuset.as_ref() {
let ret = unsafe {
libc::sched_setaffinity(
0,
std::mem::size_of::<libc::cpu_set_t>(),
cpuset as *const libc::cpu_set_t,
)
};
if ret != 0 {
error!(
"Failed scheduling the vCPU {} on the expected CPU set: {}",
vcpu_id,
io::Error::last_os_error()
);
return;
}
}
// Apply seccomp filter for vcpu thread.
if !vcpu_seccomp_filter.is_empty() {
if let Err(e) =
apply_filter(&vcpu_seccomp_filter).map_err(Error::ApplySeccompFilter)
{
error!("Error applying seccomp filter: {:?}", e);
return;
}
}
extern "C" fn handle_signal(_: i32, _: *mut siginfo_t, _: *mut c_void) {}
// This uses an async signal safe handler to kill the vcpu handles.
register_signal_handler(SIGRTMIN(), handle_signal)
.expect("Failed to register vcpu signal handler");
// Block until all CPUs are ready.
vcpu_thread_barrier.wait();
std::panic::catch_unwind(move || {
loop {
// If we are being told to pause, we park the thread
// until the pause boolean is toggled.
// The resume operation is responsible for toggling
// the boolean and unpark the thread.
// We enter a loop because park() could spuriously
// return. We will then park() again unless the
// pause boolean has been toggled.
// Need to use Ordering::SeqCst as we have multiple
// loads and stores to different atomics and we need
// to see them in a consistent order in all threads
if vcpu_pause_signalled.load(Ordering::SeqCst) {
vcpu_run_interrupted.store(true, Ordering::SeqCst);
while vcpu_pause_signalled.load(Ordering::SeqCst) {
thread::park();
}
vcpu_run_interrupted.store(false, Ordering::SeqCst);
}
// We've been told to terminate
if vcpu_kill_signalled.load(Ordering::SeqCst)
|| vcpu_kill.load(Ordering::SeqCst)
{
vcpu_run_interrupted.store(true, Ordering::SeqCst);
break;
}
// vcpu.run() returns false on a triple-fault so trigger a reset
match vcpu.lock().unwrap().run() {
Ok(run) => match run {
#[cfg(target_arch = "x86_64")]
VmExit::IoapicEoi(vector) => {
if let Some(interrupt_controller) =
&interrupt_controller_clone
{
interrupt_controller
.lock()
.unwrap()
.end_of_interrupt(vector);
}
}
VmExit::Ignore => {}
VmExit::Hyperv => {}
VmExit::Reset => {
info!("VmExit::Reset");
vcpu_run_interrupted.store(true, Ordering::SeqCst);
reset_evt.write(1).unwrap();
break;
}
VmExit::Shutdown => {
info!("VmExit::Shutdown");
vcpu_run_interrupted.store(true, Ordering::SeqCst);
exit_evt.write(1).unwrap();
break;
}
_ => {
error!(
"VCPU generated error: {:?}",
Error::UnexpectedVmExit
);
break;
}
},
Err(e) => {
error!("VCPU generated error: {:?}", Error::VcpuRun(e.into()));
break;
}
}
// We've been told to terminate
if vcpu_kill_signalled.load(Ordering::SeqCst)
|| vcpu_kill.load(Ordering::SeqCst)
{
vcpu_run_interrupted.store(true, Ordering::SeqCst);
break;
}
}
})
.or_else(|_| {
panic_vcpu_run_interrupted.store(true, Ordering::SeqCst);
error!("vCPU thread panicked");
panic_exit_evt.write(1)
})
.ok();
})
.map_err(Error::VcpuSpawn)?,
);
// On hot plug calls into this function entry_point is None. It is for
// those hotplug CPU additions that we need to set the inserting flag.
self.vcpu_states[usize::from(vcpu_id)].handle = handle;
self.vcpu_states[usize::from(vcpu_id)].inserting = inserting;
Ok(())
}
/// Start up as many vCPUs threads as needed to reach `desired_vcpus`
fn activate_vcpus(&mut self, desired_vcpus: u8, inserting: bool) -> Result<()> {
if desired_vcpus > self.config.max_vcpus {
return Err(Error::DesiredVCpuCountExceedsMax);
}
let vcpu_thread_barrier = Arc::new(Barrier::new(
(desired_vcpus - self.present_vcpus() + 1) as usize,
));
info!(
"Starting vCPUs: desired = {}, allocated = {}, present = {}",
desired_vcpus,
self.vcpus.len(),
self.present_vcpus()
);
// This reuses any inactive vCPUs as well as any that were newly created
for vcpu_id in self.present_vcpus()..desired_vcpus {
let vcpu = Arc::clone(&self.vcpus[vcpu_id as usize]);
self.start_vcpu(vcpu, vcpu_id, vcpu_thread_barrier.clone(), inserting)?;
}
// Unblock all CPU threads.
vcpu_thread_barrier.wait();
Ok(())
}
fn mark_vcpus_for_removal(&mut self, desired_vcpus: u8) {
// Mark vCPUs for removal, actual removal happens on ejection
for cpu_id in desired_vcpus..self.present_vcpus() {
self.vcpu_states[usize::from(cpu_id)].removing = true;
}
}
fn remove_vcpu(&mut self, cpu_id: u8) -> Result<()> {
info!("Removing vCPU: cpu_id = {}", cpu_id);
let mut state = &mut self.vcpu_states[usize::from(cpu_id)];
state.kill.store(true, Ordering::SeqCst);
state.signal_thread();
state.join_thread()?;
state.handle = None;
// Once the thread has exited, clear the "kill" so that it can reused
state.kill.store(false, Ordering::SeqCst);
Ok(())
}
pub fn create_boot_vcpus(&mut self, entry_point: Option<EntryPoint>) -> Result<()> {
self.create_vcpus(self.boot_vcpus(), entry_point)
}
// Starts all the vCPUs that the VM is booting with. Blocks until all vCPUs are running.
pub fn start_boot_vcpus(&mut self) -> Result<()> {
self.activate_vcpus(self.boot_vcpus(), false)
}
pub fn start_restored_vcpus(&mut self) -> Result<()> {
let vcpu_numbers = self.vcpus.len() as u8;
let vcpu_thread_barrier = Arc::new(Barrier::new((vcpu_numbers + 1) as usize));
// Restore the vCPUs in "paused" state.
self.vcpus_pause_signalled.store(true, Ordering::SeqCst);
for vcpu_id in 0..vcpu_numbers {
let vcpu = Arc::clone(&self.vcpus[vcpu_id as usize]);
self.start_vcpu(vcpu, vcpu_id, vcpu_thread_barrier.clone(), false)
.map_err(|e| {
Error::StartRestoreVcpu(anyhow!("Failed to start restored vCPUs: {:#?}", e))
})?;
}
// Unblock all restored CPU threads.
vcpu_thread_barrier.wait();
Ok(())
}
pub fn resize(&mut self, desired_vcpus: u8) -> Result<bool> {
match desired_vcpus.cmp(&self.present_vcpus()) {
cmp::Ordering::Greater => {
self.create_vcpus(desired_vcpus, None)?;
self.activate_vcpus(desired_vcpus, true)?;
Ok(true)
}
cmp::Ordering::Less => {
self.mark_vcpus_for_removal(desired_vcpus);
Ok(true)
}
_ => Ok(false),
}
}
pub fn shutdown(&mut self) -> Result<()> {
// Tell the vCPUs to stop themselves next time they go through the loop
self.vcpus_kill_signalled.store(true, Ordering::SeqCst);
// Toggle the vCPUs pause boolean
self.vcpus_pause_signalled.store(false, Ordering::SeqCst);
// Unpark all the VCPU threads.
for state in self.vcpu_states.iter() {
state.unpark_thread();
}
// Signal to the spawned threads (vCPUs and console signal handler). For the vCPU threads
// this will interrupt the KVM_RUN ioctl() allowing the loop to check the boolean set
// above.
for state in self.vcpu_states.iter() {
state.signal_thread();
}
// Wait for all the threads to finish. This removes the state from the vector.
for mut state in self.vcpu_states.drain(..) {
state.join_thread()?;
}
Ok(())
}
#[cfg(feature = "tdx")]
pub fn initialize_tdx(&self, hob_address: u64) -> Result<()> {
for vcpu in &self.vcpus {
vcpu.lock()
.unwrap()
.vcpu
.tdx_init(hob_address)
.map_err(Error::InitializeTdx)?;
}
Ok(())
}
pub fn boot_vcpus(&self) -> u8 {
self.config.boot_vcpus
}
pub fn max_vcpus(&self) -> u8 {
self.config.max_vcpus
}
#[cfg(target_arch = "x86_64")]
pub fn common_cpuid(&self) -> CpuId {
self.cpuid.clone()
}
fn present_vcpus(&self) -> u8 {
self.vcpu_states
.iter()
.fold(0, |acc, state| acc + state.active() as u8)
}
#[cfg(target_arch = "aarch64")]
pub fn get_mpidrs(&self) -> Vec<u64> {
self.vcpus
.iter()
.map(|cpu| cpu.lock().unwrap().get_mpidr())
.collect()
}
#[cfg(target_arch = "aarch64")]
pub fn get_saved_states(&self) -> Vec<CpuState> {
self.vcpus
.iter()
.map(|cpu| cpu.lock().unwrap().get_saved_state().unwrap())
.collect()
}
#[cfg(target_arch = "aarch64")]
pub fn get_vcpu_topology(&self) -> Option<(u8, u8, u8)> {
self.config
.topology
.clone()
.map(|t| (t.threads_per_core, t.cores_per_die, t.packages))
}
#[cfg(feature = "acpi")]
pub fn create_madt(&self) -> Sdt {
use crate::acpi;
// This is also checked in the commandline parsing.
assert!(self.config.boot_vcpus <= self.config.max_vcpus);
let mut madt = Sdt::new(*b"APIC", 44, 5, *b"CLOUDH", *b"CHMADT ", 1);
#[cfg(target_arch = "x86_64")]
{
madt.write(36, arch::layout::APIC_START);
for cpu in 0..self.config.max_vcpus {
let lapic = LocalApic {
r#type: acpi::ACPI_APIC_PROCESSOR,
length: 8,
processor_id: cpu,
apic_id: cpu,
flags: if cpu < self.config.boot_vcpus {
1 << MADT_CPU_ENABLE_FLAG
} else {
0
},
};
madt.append(lapic);
}
madt.append(Ioapic {
r#type: acpi::ACPI_APIC_IO,
length: 12,
ioapic_id: 0,
apic_address: arch::layout::IOAPIC_START.0 as u32,
gsi_base: 0,
..Default::default()
});
madt.append(InterruptSourceOverride {
r#type: acpi::ACPI_APIC_XRUPT_OVERRIDE,
length: 10,
bus: 0,
source: 4,
gsi: 4,
flags: 0,
});
}
#[cfg(target_arch = "aarch64")]
{
/* Notes:
* Ignore Local Interrupt Controller Address at byte offset 36 of MADT table.
*/
// See section 5.2.12.14 GIC CPU Interface (GICC) Structure in ACPI spec.
for cpu in 0..self.config.boot_vcpus {
let vcpu = &self.vcpus[cpu as usize];
let mpidr = vcpu.lock().unwrap().get_mpidr();
/* ARMv8 MPIDR format:
Bits [63:40] Must be zero
Bits [39:32] Aff3 : Match Aff3 of target processor MPIDR
Bits [31:24] Must be zero
Bits [23:16] Aff2 : Match Aff2 of target processor MPIDR
Bits [15:8] Aff1 : Match Aff1 of target processor MPIDR
Bits [7:0] Aff0 : Match Aff0 of target processor MPIDR
*/
let mpidr_mask = 0xff_00ff_ffff;
let gicc = GicC {
r#type: acpi::ACPI_APIC_GENERIC_CPU_INTERFACE,
length: 80,
reserved0: 0,
cpu_interface_number: cpu as u32,
uid: cpu as u32,
flags: 1,
parking_version: 0,
performance_interrupt: 0,
parked_address: 0,
base_address: 0,
gicv_base_address: 0,
gich_base_address: 0,
vgic_interrupt: 0,
gicr_base_address: 0,
mpidr: mpidr & mpidr_mask,
proc_power_effi_class: 0,
reserved1: 0,
spe_overflow_interrupt: 0,
};
madt.append(gicc);
}
// GIC Distributor structure. See section 5.2.12.15 in ACPI spec.
let gicd = GicD {
r#type: acpi::ACPI_APIC_GENERIC_DISTRIBUTOR,
length: 24,
reserved0: 0,
gic_id: 0,
base_address: arch::layout::MAPPED_IO_START - 0x0001_0000,
global_irq_base: 0,
version: 3,
reserved1: [0; 3],
};
madt.append(gicd);
// See 5.2.12.17 GIC Redistributor (GICR) Structure in ACPI spec.
let gicr_size: u32 = 0x0001_0000 * 2 * (self.config.boot_vcpus as u32);
let gicr_base: u64 = arch::layout::MAPPED_IO_START - 0x0001_0000 - gicr_size as u64;
let gicr = GicR {
r#type: acpi::ACPI_APIC_GENERIC_REDISTRIBUTOR,
length: 16,
reserved: 0,
base_address: gicr_base,
range_length: gicr_size,
};
madt.append(gicr);
// See 5.2.12.18 GIC Interrupt Translation Service (ITS) Structure in ACPI spec.
let gicits = GicIts {
r#type: acpi::ACPI_APIC_GENERIC_TRANSLATOR,
length: 20,
reserved0: 0,
translation_id: 0,
base_address: gicr_base - 2 * 0x0001_0000,
reserved1: 0,
};
madt.append(gicits);
madt.update_checksum();
}
madt
}
#[cfg(all(target_arch = "aarch64", feature = "acpi"))]
pub fn create_pptt(&self) -> Sdt {
let pptt_start = 0;
let mut cpus = 0;
let mut uid = 0;
let threads_per_core = self.get_vcpu_topology().unwrap_or_default().0 as u8;
let cores_per_package = self.get_vcpu_topology().unwrap_or_default().1 as u8;
let packages = self.get_vcpu_topology().unwrap_or_default().2 as u8;
let mut pptt = Sdt::new(*b"PPTT", 36, 2, *b"CLOUDH", *b"CHPPTT ", 1);
for cluster_idx in 0..packages {
if cpus < self.config.boot_vcpus as usize {
let cluster_offset = pptt.len() - pptt_start;
let cluster_hierarchy_node = ProcessorHierarchyNode {
r#type: 0,
length: 20,
reserved: 0,
flags: 0x2,
parent: 0,
acpi_processor_id: cluster_idx as u32,
num_private_resources: 0,
};
pptt.append(cluster_hierarchy_node);
for core_idx in 0..cores_per_package {
let core_offset = pptt.len() - pptt_start;
if threads_per_core > 1 {
let core_hierarchy_node = ProcessorHierarchyNode {
r#type: 0,
length: 20,
reserved: 0,
flags: 0x2,
parent: cluster_offset as u32,
acpi_processor_id: core_idx as u32,
num_private_resources: 0,
};
pptt.append(core_hierarchy_node);
for _thread_idx in 0..threads_per_core {
let thread_hierarchy_node = ProcessorHierarchyNode {
r#type: 0,
length: 20,
reserved: 0,
flags: 0xE,
parent: core_offset as u32,
acpi_processor_id: uid as u32,
num_private_resources: 0,
};
pptt.append(thread_hierarchy_node);
uid += 1;
}
} else {
let thread_hierarchy_node = ProcessorHierarchyNode {
r#type: 0,
length: 20,
reserved: 0,
flags: 0xA,
parent: cluster_offset as u32,
acpi_processor_id: uid as u32,
num_private_resources: 0,
};
pptt.append(thread_hierarchy_node);
uid += 1;
}
}
cpus += (cores_per_package * threads_per_core) as usize;
}
}
pptt.update_checksum();
pptt
}
}
#[cfg(feature = "acpi")]
struct Cpu {
cpu_id: u8,
proximity_domain: u32,
}
#[cfg(all(target_arch = "x86_64", feature = "acpi"))]
const MADT_CPU_ENABLE_FLAG: usize = 0;
#[cfg(feature = "acpi")]
impl Cpu {
#[cfg(target_arch = "x86_64")]
fn generate_mat(&self) -> Vec<u8> {
let lapic = LocalApic {
r#type: 0,
length: 8,
processor_id: self.cpu_id,
apic_id: self.cpu_id,
flags: 1 << MADT_CPU_ENABLE_FLAG,
};
let mut mat_data: Vec<u8> = Vec::new();
mat_data.resize(std::mem::size_of_val(&lapic), 0);
unsafe { *(mat_data.as_mut_ptr() as *mut LocalApic) = lapic };
mat_data
}
}
#[cfg(feature = "acpi")]
impl Aml for Cpu {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
#[cfg(target_arch = "x86_64")]
let mat_data: Vec<u8> = self.generate_mat();
aml::Device::new(
format!("C{:03}", self.cpu_id).as_str().into(),
vec![
&aml::Name::new("_HID".into(), &"ACPI0007"),
&aml::Name::new("_UID".into(), &self.cpu_id),
// Currently, AArch64 cannot support following fields.
/*
_STA return value:
Bit [0] – Set if the device is present.
Bit [1] – Set if the device is enabled and decoding its resources.
Bit [2] – Set if the device should be shown in the UI.
Bit [3] – Set if the device is functioning properly (cleared if device failed its diagnostics).
Bit [4] – Set if the battery is present.
Bits [31:5] – Reserved (must be cleared).
*/
#[cfg(target_arch = "x86_64")]
&aml::Method::new(
"_STA".into(),
0,
false,
// Call into CSTA method which will interrogate device
vec![&aml::Return::new(&aml::MethodCall::new(
"CSTA".into(),
vec![&self.cpu_id],
))],
),
&aml::Method::new(
"_PXM".into(),
0,
false,
vec![&aml::Return::new(&self.proximity_domain)],
),
// The Linux kernel expects every CPU device to have a _MAT entry
// containing the LAPIC for this processor with the enabled bit set
// even it if is disabled in the MADT (non-boot CPU)
#[cfg(target_arch = "x86_64")]
&aml::Name::new("_MAT".into(), &aml::Buffer::new(mat_data)),
// Trigger CPU ejection
#[cfg(target_arch = "x86_64")]
&aml::Method::new(
"_EJ0".into(),
1,
false,
// Call into CEJ0 method which will actually eject device
vec![&aml::MethodCall::new("CEJ0".into(), vec![&self.cpu_id])],
),
],
)
.append_aml_bytes(bytes)
}
}
#[cfg(feature = "acpi")]
struct CpuNotify {
cpu_id: u8,
}
#[cfg(feature = "acpi")]
impl Aml for CpuNotify {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
let object = aml::Path::new(&format!("C{:03}", self.cpu_id));
aml::If::new(
&aml::Equal::new(&aml::Arg(0), &self.cpu_id),
vec![&aml::Notify::new(&object, &aml::Arg(1))],
)
.append_aml_bytes(bytes)
}
}
#[cfg(feature = "acpi")]
struct CpuMethods {
max_vcpus: u8,
}
#[cfg(feature = "acpi")]
impl Aml for CpuMethods {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
// CPU status method
aml::Method::new(
"CSTA".into(),
1,
true,
vec![
// Take lock defined above
&aml::Acquire::new("\\_SB_.PRES.CPLK".into(), 0xffff),
// Write CPU number (in first argument) to I/O port via field
&aml::Store::new(&aml::Path::new("\\_SB_.PRES.CSEL"), &aml::Arg(0)),
&aml::Store::new(&aml::Local(0), &aml::ZERO),
// Check if CPEN bit is set, if so make the local variable 0xf (see _STA for details of meaning)
&aml::If::new(
&aml::Equal::new(&aml::Path::new("\\_SB_.PRES.CPEN"), &aml::ONE),
vec![&aml::Store::new(&aml::Local(0), &0xfu8)],
),
// Release lock
&aml::Release::new("\\_SB_.PRES.CPLK".into()),
// Return 0 or 0xf
&aml::Return::new(&aml::Local(0)),
],
)
.append_aml_bytes(bytes);
let mut cpu_notifies = Vec::new();
for cpu_id in 0..self.max_vcpus {
cpu_notifies.push(CpuNotify { cpu_id });
}
let mut cpu_notifies_refs: Vec<&dyn aml::Aml> = Vec::new();
for cpu_id in 0..self.max_vcpus {
cpu_notifies_refs.push(&cpu_notifies[usize::from(cpu_id)]);
}
aml::Method::new("CTFY".into(), 2, true, cpu_notifies_refs).append_aml_bytes(bytes);
aml::Method::new(
"CEJ0".into(),
1,
true,
vec![
&aml::Acquire::new("\\_SB_.PRES.CPLK".into(), 0xffff),
// Write CPU number (in first argument) to I/O port via field
&aml::Store::new(&aml::Path::new("\\_SB_.PRES.CSEL"), &aml::Arg(0)),
// Set CEJ0 bit
&aml::Store::new(&aml::Path::new("\\_SB_.PRES.CEJ0"), &aml::ONE),
&aml::Release::new("\\_SB_.PRES.CPLK".into()),
],
)
.append_aml_bytes(bytes);
aml::Method::new(
"CSCN".into(),
0,
true,
vec![
// Take lock defined above
&aml::Acquire::new("\\_SB_.PRES.CPLK".into(), 0xffff),
&aml::Store::new(&aml::Local(0), &aml::ZERO),
&aml::While::new(
&aml::LessThan::new(&aml::Local(0), &self.max_vcpus),
vec![
// Write CPU number (in first argument) to I/O port via field
&aml::Store::new(&aml::Path::new("\\_SB_.PRES.CSEL"), &aml::Local(0)),
// Check if CINS bit is set
&aml::If::new(
&aml::Equal::new(&aml::Path::new("\\_SB_.PRES.CINS"), &aml::ONE),
// Notify device if it is
vec![
&aml::MethodCall::new(
"CTFY".into(),
vec![&aml::Local(0), &aml::ONE],
),
// Reset CINS bit
&aml::Store::new(&aml::Path::new("\\_SB_.PRES.CINS"), &aml::ONE),
],
),
// Check if CRMV bit is set
&aml::If::new(
&aml::Equal::new(&aml::Path::new("\\_SB_.PRES.CRMV"), &aml::ONE),
// Notify device if it is (with the eject constant 0x3)
vec![
&aml::MethodCall::new("CTFY".into(), vec![&aml::Local(0), &3u8]),
// Reset CRMV bit
&aml::Store::new(&aml::Path::new("\\_SB_.PRES.CRMV"), &aml::ONE),
],
),
&aml::Add::new(&aml::Local(0), &aml::Local(0), &aml::ONE),
],
),
// Release lock
&aml::Release::new("\\_SB_.PRES.CPLK".into()),
],
)
.append_aml_bytes(bytes)
}
}
#[cfg(feature = "acpi")]
impl Aml for CpuManager {
fn append_aml_bytes(&self, bytes: &mut Vec<u8>) {
// CPU hotplug controller
#[cfg(target_arch = "x86_64")]
aml::Device::new(
"_SB_.PRES".into(),
vec![
&aml::Name::new("_HID".into(), &aml::EisaName::new("PNP0A06")),
&aml::Name::new("_UID".into(), &"CPU Hotplug Controller"),
// Mutex to protect concurrent access as we write to choose CPU and then read back status
&aml::Mutex::new("CPLK".into(), 0),
&aml::Name::new(
"_CRS".into(),
&aml::ResourceTemplate::new(vec![&aml::AddressSpace::new_memory(
aml::AddressSpaceCachable::NotCacheable,
true,
self.acpi_address.0 as u64,
self.acpi_address.0 + CPU_MANAGER_ACPI_SIZE as u64 - 1,
)]),
),
// OpRegion and Fields map MMIO range into individual field values
&aml::OpRegion::new(
"PRST".into(),
aml::OpRegionSpace::SystemMemory,
self.acpi_address.0 as usize,
CPU_MANAGER_ACPI_SIZE,
),
&aml::Field::new(
"PRST".into(),
aml::FieldAccessType::Byte,
aml::FieldUpdateRule::WriteAsZeroes,
vec![
aml::FieldEntry::Reserved(32),
aml::FieldEntry::Named(*b"CPEN", 1),
aml::FieldEntry::Named(*b"CINS", 1),
aml::FieldEntry::Named(*b"CRMV", 1),
aml::FieldEntry::Named(*b"CEJ0", 1),
aml::FieldEntry::Reserved(4),
aml::FieldEntry::Named(*b"CCMD", 8),
],
),
&aml::Field::new(
"PRST".into(),
aml::FieldAccessType::DWord,
aml::FieldUpdateRule::Preserve,
vec![
aml::FieldEntry::Named(*b"CSEL", 32),
aml::FieldEntry::Reserved(32),
aml::FieldEntry::Named(*b"CDAT", 32),
],
),
],
)
.append_aml_bytes(bytes);
// CPU devices
let hid = aml::Name::new("_HID".into(), &"ACPI0010");
let uid = aml::Name::new("_CID".into(), &aml::EisaName::new("PNP0A05"));
// Bundle methods together under a common object
let methods = CpuMethods {
max_vcpus: self.config.max_vcpus,
};
let mut cpu_data_inner: Vec<&dyn aml::Aml> = vec![&hid, &uid, &methods];
let mut cpu_devices = Vec::new();
for cpu_id in 0..self.config.max_vcpus {
let proximity_domain = *self.proximity_domain_per_cpu.get(&cpu_id).unwrap_or(&0);
let cpu_device = Cpu {
cpu_id,
proximity_domain,
};
cpu_devices.push(cpu_device);
}
for cpu_device in cpu_devices.iter() {
cpu_data_inner.push(cpu_device);
}
aml::Device::new("_SB_.CPUS".into(), cpu_data_inner).append_aml_bytes(bytes)
}
}
impl Pausable for CpuManager {
fn pause(&mut self) -> std::result::Result<(), MigratableError> {
// Tell the vCPUs to pause themselves next time they exit
self.vcpus_pause_signalled.store(true, Ordering::SeqCst);
// Signal to the spawned threads (vCPUs and console signal handler). For the vCPU threads
// this will interrupt the KVM_RUN ioctl() allowing the loop to check the boolean set
// above.
for state in self.vcpu_states.iter() {
state.signal_thread();
}
for vcpu in self.vcpus.iter() {
let mut vcpu = vcpu.lock().unwrap();
vcpu.pause()?;
#[cfg(all(feature = "kvm", target_arch = "x86_64"))]
if !self.config.kvm_hyperv {
vcpu.vcpu.notify_guest_clock_paused().map_err(|e| {
MigratableError::Pause(anyhow!(
"Could not notify guest it has been paused {:?}",
e
))
})?;
}
}
Ok(())
}
fn resume(&mut self) -> std::result::Result<(), MigratableError> {
for vcpu in self.vcpus.iter() {
vcpu.lock().unwrap().resume()?;
}
// Toggle the vCPUs pause boolean
self.vcpus_pause_signalled.store(false, Ordering::SeqCst);
// Unpark all the VCPU threads.
// Once unparked, the next thing they will do is checking for the pause
// boolean. Since it'll be set to false, they will exit their pause loop
// and go back to vmx root.
for state in self.vcpu_states.iter() {
state.unpark_thread();
}
Ok(())
}
}
impl Snapshottable for CpuManager {
fn id(&self) -> String {
CPU_MANAGER_SNAPSHOT_ID.to_string()
}
fn snapshot(&mut self) -> std::result::Result<Snapshot, MigratableError> {
let mut cpu_manager_snapshot = Snapshot::new(CPU_MANAGER_SNAPSHOT_ID);
// The CpuManager snapshot is a collection of all vCPUs snapshots.
for vcpu in &self.vcpus {
let cpu_snapshot = vcpu.lock().unwrap().snapshot()?;
cpu_manager_snapshot.add_snapshot(cpu_snapshot);
}
Ok(cpu_manager_snapshot)
}
fn restore(&mut self, snapshot: Snapshot) -> std::result::Result<(), MigratableError> {
for (cpu_id, snapshot) in snapshot.snapshots.iter() {
info!("Restoring VCPU {}", cpu_id);
self.create_vcpu(cpu_id.parse::<u8>().unwrap(), None, Some(*snapshot.clone()))
.map_err(|e| MigratableError::Restore(anyhow!("Could not create vCPU {:?}", e)))?;
}
Ok(())
}
}
impl Transportable for CpuManager {}
impl Migratable for CpuManager {}
#[cfg(all(feature = "kvm", target_arch = "x86_64"))]
#[cfg(test)]
mod tests {
use arch::x86_64::interrupts::*;
use arch::x86_64::regs::*;
use hypervisor::x86_64::{FpuState, LapicState, StandardRegisters};
#[test]
fn test_setlint() {
let hv = hypervisor::new().unwrap();
let vm = hv.create_vm().expect("new VM fd creation failed");
assert!(hv.check_required_extensions().is_ok());
// Calling get_lapic will fail if there is no irqchip before hand.
assert!(vm.create_irq_chip().is_ok());
let vcpu = vm.create_vcpu(0, None).unwrap();
let klapic_before: LapicState = vcpu.get_lapic().unwrap();
// Compute the value that is expected to represent LVT0 and LVT1.
let lint0 = get_klapic_reg(&klapic_before, APIC_LVT0);
let lint1 = get_klapic_reg(&klapic_before, APIC_LVT1);
let lint0_mode_expected = set_apic_delivery_mode(lint0, APIC_MODE_EXTINT);
let lint1_mode_expected = set_apic_delivery_mode(lint1, APIC_MODE_NMI);
set_lint(&vcpu).unwrap();
// Compute the value that represents LVT0 and LVT1 after set_lint.
let klapic_actual: LapicState = vcpu.get_lapic().unwrap();
let lint0_mode_actual = get_klapic_reg(&klapic_actual, APIC_LVT0);
let lint1_mode_actual = get_klapic_reg(&klapic_actual, APIC_LVT1);
assert_eq!(lint0_mode_expected, lint0_mode_actual);
assert_eq!(lint1_mode_expected, lint1_mode_actual);
}
#[test]
fn test_setup_fpu() {
let hv = hypervisor::new().unwrap();
let vm = hv.create_vm().expect("new VM fd creation failed");
let vcpu = vm.create_vcpu(0, None).unwrap();
setup_fpu(&vcpu).unwrap();
let expected_fpu: FpuState = FpuState {
fcw: 0x37f,
mxcsr: 0x1f80,
..Default::default()
};
let actual_fpu: FpuState = vcpu.get_fpu().unwrap();
// TODO: auto-generate kvm related structures with PartialEq on.
assert_eq!(expected_fpu.fcw, actual_fpu.fcw);
// Setting the mxcsr register from FpuState inside setup_fpu does not influence anything.
// See 'kvm_arch_vcpu_ioctl_set_fpu' from arch/x86/kvm/x86.c.
// The mxcsr will stay 0 and the assert below fails. Decide whether or not we should
// remove it at all.
// assert!(expected_fpu.mxcsr == actual_fpu.mxcsr);
}
#[test]
fn test_setup_msrs() {
use hypervisor::arch::x86::msr_index;
use hypervisor::x86_64::{MsrEntries, MsrEntry};
let hv = hypervisor::new().unwrap();
let vm = hv.create_vm().expect("new VM fd creation failed");
let vcpu = vm.create_vcpu(0, None).unwrap();
setup_msrs(&vcpu).unwrap();
// This test will check against the last MSR entry configured (the tenth one).
// See create_msr_entries for details.
let mut msrs = MsrEntries::from_entries(&[MsrEntry {
index: msr_index::MSR_IA32_MISC_ENABLE,
..Default::default()
}])
.unwrap();
// get_msrs returns the number of msrs that it succeed in reading. We only want to read 1
// in this test case scenario.
let read_msrs = vcpu.get_msrs(&mut msrs).unwrap();
assert_eq!(read_msrs, 1);
// Official entries that were setup when we did setup_msrs. We need to assert that the
// tenth one (i.e the one with index msr_index::MSR_IA32_MISC_ENABLE has the data we
// expect.
let entry_vec = hypervisor::x86_64::boot_msr_entries();
assert_eq!(entry_vec.as_slice()[9], msrs.as_slice()[0]);
}
#[test]
fn test_setup_regs() {
let hv = hypervisor::new().unwrap();
let vm = hv.create_vm().expect("new VM fd creation failed");
let vcpu = vm.create_vcpu(0, None).unwrap();
let expected_regs: StandardRegisters = StandardRegisters {
rflags: 0x0000000000000002u64,
rbx: arch::layout::PVH_INFO_START.0,
rip: 1,
..Default::default()
};
setup_regs(&vcpu, expected_regs.rip).unwrap();
let actual_regs: StandardRegisters = vcpu.get_regs().unwrap();
assert_eq!(actual_regs, expected_regs);
}
}
#[cfg(target_arch = "aarch64")]
#[cfg(test)]
mod tests {
use arch::aarch64::regs::*;
use hypervisor::kvm::aarch64::{is_system_register, MPIDR_EL1};
use hypervisor::kvm::kvm_bindings::{
kvm_one_reg, kvm_regs, kvm_vcpu_init, user_pt_regs, KVM_REG_ARM64, KVM_REG_ARM64_SYSREG,
KVM_REG_ARM_CORE, KVM_REG_SIZE_U64,
};
use hypervisor::{arm64_core_reg_id, offset__of};
use std::mem;
#[test]
fn test_setup_regs() {
let hv = hypervisor::new().unwrap();
let vm = hv.create_vm().unwrap();
let vcpu = vm.create_vcpu(0, None).unwrap();
let res = setup_regs(&vcpu, 0, 0x0);
// Must fail when vcpu is not initialized yet.
assert!(res.is_err());
let mut kvi: kvm_vcpu_init = kvm_vcpu_init::default();
vm.get_preferred_target(&mut kvi).unwrap();
vcpu.vcpu_init(&kvi).unwrap();
assert!(setup_regs(&vcpu, 0, 0x0).is_ok());
}
#[test]
fn test_read_mpidr() {
let hv = hypervisor::new().unwrap();
let vm = hv.create_vm().unwrap();
let vcpu = vm.create_vcpu(0, None).unwrap();
let mut kvi: kvm_vcpu_init = kvm_vcpu_init::default();
vm.get_preferred_target(&mut kvi).unwrap();
// Must fail when vcpu is not initialized yet.
assert!(vcpu.read_mpidr().is_err());
vcpu.vcpu_init(&kvi).unwrap();
assert_eq!(vcpu.read_mpidr().unwrap(), 0x80000000);
}
#[test]
fn test_is_system_register() {
let offset = offset__of!(user_pt_regs, pc);
let regid = arm64_core_reg_id!(KVM_REG_SIZE_U64, offset);
assert!(!is_system_register(regid));
let regid = KVM_REG_ARM64 as u64 | KVM_REG_SIZE_U64 as u64 | KVM_REG_ARM64_SYSREG as u64;
assert!(is_system_register(regid));
}
#[test]
fn test_save_restore_core_regs() {
let hv = hypervisor::new().unwrap();
let vm = hv.create_vm().unwrap();
let vcpu = vm.create_vcpu(0, None).unwrap();
let mut kvi: kvm_vcpu_init = kvm_vcpu_init::default();
vm.get_preferred_target(&mut kvi).unwrap();
// Must fail when vcpu is not initialized yet.
let mut state = kvm_regs::default();
let res = vcpu.core_registers(&mut state);
assert!(res.is_err());
assert_eq!(
format!("{}", res.unwrap_err()),
"Failed to get core register: Exec format error (os error 8)"
);
let res = vcpu.set_core_registers(&state);
assert!(res.is_err());
assert_eq!(
format!("{}", res.unwrap_err()),
"Failed to set core register: Exec format error (os error 8)"
);
vcpu.vcpu_init(&kvi).unwrap();
assert!(vcpu.core_registers(&mut state).is_ok());
assert_eq!(state.regs.pstate, 0x3C5);
assert!(vcpu.set_core_registers(&state).is_ok());
let off = offset__of!(user_pt_regs, pstate);
let pstate = vcpu
.get_reg(arm64_core_reg_id!(KVM_REG_SIZE_U64, off))
.expect("Failed to call kvm get one reg");
assert_eq!(state.regs.pstate, pstate);
}
#[test]
fn test_save_restore_system_regs() {
let hv = hypervisor::new().unwrap();
let vm = hv.create_vm().unwrap();
let vcpu = vm.create_vcpu(0, None).unwrap();
let mut kvi: kvm_vcpu_init = kvm_vcpu_init::default();
vm.get_preferred_target(&mut kvi).unwrap();
// Must fail when vcpu is not initialized yet.
let mut state: Vec<kvm_one_reg> = Vec::new();
let res = vcpu.system_registers(&mut state);
assert!(res.is_err());
assert_eq!(
format!("{}", res.unwrap_err()),
"Failed to retrieve list of registers: Exec format error (os error 8)"
);
state.push(kvm_one_reg {
id: MPIDR_EL1,
addr: 0x00,
});
let res = vcpu.set_system_registers(&state);
assert!(res.is_err());
assert_eq!(
format!("{}", res.unwrap_err()),
"Failed to set system register: Exec format error (os error 8)"
);
vcpu.vcpu_init(&kvi).unwrap();
assert!(vcpu.system_registers(&mut state).is_ok());
let initial_mpidr: u64 = vcpu.read_mpidr().expect("Fail to read mpidr");
assert!(state.contains(&kvm_one_reg {
id: MPIDR_EL1,
addr: initial_mpidr
}));
assert!(vcpu.set_system_registers(&state).is_ok());
let mpidr: u64 = vcpu.read_mpidr().expect("Fail to read mpidr");
assert_eq!(initial_mpidr, mpidr);
}
#[test]
fn test_get_set_mpstate() {
let hv = hypervisor::new().unwrap();
let vm = hv.create_vm().unwrap();
let vcpu = vm.create_vcpu(0, None).unwrap();
let mut kvi: kvm_vcpu_init = kvm_vcpu_init::default();
vm.get_preferred_target(&mut kvi).unwrap();
let res = vcpu.get_mp_state();
assert!(res.is_ok());
assert!(vcpu.set_mp_state(res.unwrap()).is_ok());
}
}
| 35.784456 | 112 | 0.514914 |
9021d75c4aead39dd381fefb9cf5c9d10c68270a | 19,914 | use crate::arithmetic;
use codec::{Codec, FullCodec, MaxEncodedLen};
pub use frame_support::{
traits::{BalanceStatus, LockIdentifier},
transactional,
};
use sp_runtime::{
traits::{AtLeast32BitUnsigned, MaybeSerializeDeserialize},
DispatchError, DispatchResult,
};
use sp_std::{
cmp::{Eq, Ordering, PartialEq},
fmt::Debug,
result,
};
/// Abstraction over a fungible multi-currency system.
pub trait MultiCurrency<AccountId> {
/// The currency identifier.
type CurrencyId: FullCodec
+ Eq
+ PartialEq
+ Copy
+ MaybeSerializeDeserialize
+ Debug
+ scale_info::TypeInfo
+ MaxEncodedLen;
/// The balance of an account.
type Balance: AtLeast32BitUnsigned
+ FullCodec
+ Copy
+ MaybeSerializeDeserialize
+ Debug
+ Default
+ scale_info::TypeInfo
+ MaxEncodedLen;
// Public immutables
/// Existential deposit of `currency_id`.
fn minimum_balance(currency_id: Self::CurrencyId) -> Self::Balance;
/// The total amount of issuance of `currency_id`.
fn total_issuance(currency_id: Self::CurrencyId) -> Self::Balance;
// The combined balance of `who` under `currency_id`.
fn total_balance(currency_id: Self::CurrencyId, who: &AccountId) -> Self::Balance;
// The free balance of `who` under `currency_id`.
fn free_balance(currency_id: Self::CurrencyId, who: &AccountId) -> Self::Balance;
/// A dry-run of `withdraw`. Returns `Ok` iff the account is able to make a
/// withdrawal of the given amount.
fn ensure_can_withdraw(currency_id: Self::CurrencyId, who: &AccountId, amount: Self::Balance) -> DispatchResult;
// Public mutables
/// Transfer some amount from one account to another.
fn transfer(
currency_id: Self::CurrencyId,
from: &AccountId,
to: &AccountId,
amount: Self::Balance,
) -> DispatchResult;
/// Add `amount` to the balance of `who` under `currency_id` and increase
/// total issuance.
fn deposit(currency_id: Self::CurrencyId, who: &AccountId, amount: Self::Balance) -> DispatchResult;
/// Remove `amount` from the balance of `who` under `currency_id` and reduce
/// total issuance.
fn withdraw(currency_id: Self::CurrencyId, who: &AccountId, amount: Self::Balance) -> DispatchResult;
/// Same result as `slash(currency_id, who, value)` (but without the
/// side-effects) assuming there are no balance changes in the meantime and
/// only the reserved balance is not taken into account.
fn can_slash(currency_id: Self::CurrencyId, who: &AccountId, value: Self::Balance) -> bool;
/// Deduct the balance of `who` by up to `amount`.
///
/// As much funds up to `amount` will be deducted as possible. If this is
/// less than `amount`,then a non-zero value will be returned.
fn slash(currency_id: Self::CurrencyId, who: &AccountId, amount: Self::Balance) -> Self::Balance;
}
/// Extended `MultiCurrency` with additional helper types and methods.
pub trait MultiCurrencyExtended<AccountId>: MultiCurrency<AccountId> {
/// The type for balance related operations, typically signed int.
type Amount: arithmetic::Signed
+ TryInto<Self::Balance>
+ TryFrom<Self::Balance>
+ arithmetic::SimpleArithmetic
+ Codec
+ Copy
+ MaybeSerializeDeserialize
+ Debug
+ Default
+ scale_info::TypeInfo
+ MaxEncodedLen;
/// Add or remove abs(`by_amount`) from the balance of `who` under
/// `currency_id`. If positive `by_amount`, do add, else do remove.
fn update_balance(currency_id: Self::CurrencyId, who: &AccountId, by_amount: Self::Amount) -> DispatchResult;
}
/// A fungible multi-currency system whose accounts can have liquidity
/// restrictions.
pub trait MultiLockableCurrency<AccountId>: MultiCurrency<AccountId> {
/// The quantity used to denote time; usually just a `BlockNumber`.
type Moment;
/// Create a new balance lock on account `who`.
///
/// If the new lock is valid (i.e. not already expired), it will push the
/// struct to the `Locks` vec in storage. Note that you can lock more funds
/// than a user has.
///
/// If the lock `lock_id` already exists, this will update it.
fn set_lock(
lock_id: LockIdentifier,
currency_id: Self::CurrencyId,
who: &AccountId,
amount: Self::Balance,
) -> DispatchResult;
/// Changes a balance lock (selected by `lock_id`) so that it becomes less
/// liquid in all parameters or creates a new one if it does not exist.
///
/// Calling `extend_lock` on an existing lock `lock_id` differs from
/// `set_lock` in that it applies the most severe constraints of the two,
/// while `set_lock` replaces the lock with the new parameters. As in,
/// `extend_lock` will set:
/// - maximum `amount`
fn extend_lock(
lock_id: LockIdentifier,
currency_id: Self::CurrencyId,
who: &AccountId,
amount: Self::Balance,
) -> DispatchResult;
/// Remove an existing lock.
fn remove_lock(lock_id: LockIdentifier, currency_id: Self::CurrencyId, who: &AccountId) -> DispatchResult;
}
/// A fungible multi-currency system where funds can be reserved from the user.
pub trait MultiReservableCurrency<AccountId>: MultiCurrency<AccountId> {
/// Same result as `reserve(who, value)` (but without the side-effects)
/// assuming there are no balance changes in the meantime.
fn can_reserve(currency_id: Self::CurrencyId, who: &AccountId, value: Self::Balance) -> bool;
/// Deducts up to `value` from reserved balance of `who`. This function
/// cannot fail.
///
/// As much funds up to `value` will be deducted as possible. If the reserve
/// balance of `who` is less than `value`, then a non-zero second item will
/// be returned.
fn slash_reserved(currency_id: Self::CurrencyId, who: &AccountId, value: Self::Balance) -> Self::Balance;
/// The amount of the balance of a given account that is externally
/// reserved; this can still get slashed, but gets slashed last of all.
///
/// This balance is a 'reserve' balance that other subsystems use in order
/// to set aside tokens that are still 'owned' by the account holder, but
/// which are suspendable.
fn reserved_balance(currency_id: Self::CurrencyId, who: &AccountId) -> Self::Balance;
/// Moves `value` from balance to reserved balance.
///
/// If the free balance is lower than `value`, then no funds will be moved
/// and an `Err` will be returned to notify of this. This is different
/// behavior than `unreserve`.
fn reserve(currency_id: Self::CurrencyId, who: &AccountId, value: Self::Balance) -> DispatchResult;
/// Moves up to `value` from reserved balance to free balance. This function
/// cannot fail.
///
/// As much funds up to `value` will be moved as possible. If the reserve
/// balance of `who` is less than `value`, then the remaining amount will be
/// returned.
///
/// # NOTES
///
/// - This is different from `reserve`.
fn unreserve(currency_id: Self::CurrencyId, who: &AccountId, value: Self::Balance) -> Self::Balance;
/// Moves up to `value` from reserved balance of account `slashed` to
/// balance of account `beneficiary`. `beneficiary` must exist for this to
/// succeed. If it does not, `Err` will be returned. Funds will be placed in
/// either the `free` balance or the `reserved` balance, depending on the
/// `status`.
///
/// As much funds up to `value` will be deducted as possible. If this is
/// less than `value`, then `Ok(non_zero)` will be returned.
fn repatriate_reserved(
currency_id: Self::CurrencyId,
slashed: &AccountId,
beneficiary: &AccountId,
value: Self::Balance,
status: BalanceStatus,
) -> result::Result<Self::Balance, DispatchError>;
}
/// A fungible multi-currency system where funds can be reserved from the user
/// with an identifier.
pub trait NamedMultiReservableCurrency<AccountId>: MultiReservableCurrency<AccountId> {
/// An identifier for a reserve. Used for disambiguating different reserves
/// so that they can be individually replaced or removed.
type ReserveIdentifier;
/// Deducts up to `value` from reserved balance of `who`. This function
/// cannot fail.
///
/// As much funds up to `value` will be deducted as possible. If the reserve
/// balance of `who` is less than `value`, then a non-zero second item will
/// be returned.
fn slash_reserved_named(
id: &Self::ReserveIdentifier,
currency_id: Self::CurrencyId,
who: &AccountId,
value: Self::Balance,
) -> Self::Balance;
/// The amount of the balance of a given account that is externally
/// reserved; this can still get slashed, but gets slashed last of all.
///
/// This balance is a 'reserve' balance that other subsystems use in order
/// to set aside tokens that are still 'owned' by the account holder, but
/// which are suspendable.
///
/// When this balance falls below the value of `ExistentialDeposit`, then
/// this 'reserve account' is deleted: specifically, `ReservedBalance`.
///
/// `system::AccountNonce` is also deleted if `FreeBalance` is also zero (it
/// also gets collapsed to zero if it ever becomes less than
/// `ExistentialDeposit`.
fn reserved_balance_named(
id: &Self::ReserveIdentifier,
currency_id: Self::CurrencyId,
who: &AccountId,
) -> Self::Balance;
/// Moves `value` from balance to reserved balance.
///
/// If the free balance is lower than `value`, then no funds will be moved
/// and an `Err` will be returned to notify of this. This is different
/// behavior than `unreserve`.
fn reserve_named(
id: &Self::ReserveIdentifier,
currency_id: Self::CurrencyId,
who: &AccountId,
value: Self::Balance,
) -> DispatchResult;
/// Moves up to `value` from reserved balance to free balance. This function
/// cannot fail.
///
/// As much funds up to `value` will be moved as possible. If the reserve
/// balance of `who` is less than `value`, then the remaining amount will be
/// returned.
///
/// # NOTES
///
/// - This is different from `reserve`.
/// - If the remaining reserved balance is less than `ExistentialDeposit`,
/// it will
/// invoke `on_reserved_too_low` and could reap the account.
fn unreserve_named(
id: &Self::ReserveIdentifier,
currency_id: Self::CurrencyId,
who: &AccountId,
value: Self::Balance,
) -> Self::Balance;
/// Moves up to `value` from reserved balance of account `slashed` to
/// balance of account `beneficiary`. `beneficiary` must exist for this to
/// succeed. If it does not, `Err` will be returned. Funds will be placed in
/// either the `free` balance or the `reserved` balance, depending on the
/// `status`.
///
/// As much funds up to `value` will be deducted as possible. If this is
/// less than `value`, then `Ok(non_zero)` will be returned.
fn repatriate_reserved_named(
id: &Self::ReserveIdentifier,
currency_id: Self::CurrencyId,
slashed: &AccountId,
beneficiary: &AccountId,
value: Self::Balance,
status: BalanceStatus,
) -> result::Result<Self::Balance, DispatchError>;
/// Ensure the reserved balance is equal to `value`.
///
/// This will reserve extra amount of current reserved balance is less than
/// `value`. And unreserve if current reserved balance is greater than
/// `value`.
fn ensure_reserved_named(
id: &Self::ReserveIdentifier,
currency_id: Self::CurrencyId,
who: &AccountId,
value: Self::Balance,
) -> DispatchResult {
let current = Self::reserved_balance_named(id, currency_id, who);
match current.cmp(&value) {
Ordering::Less => {
// we checked value > current
Self::reserve_named(id, currency_id, who, value - current)
}
Ordering::Equal => Ok(()),
Ordering::Greater => {
// we always have enough balance to unreserve here
Self::unreserve_named(id, currency_id, who, current - value);
Ok(())
}
}
}
/// Unreserve all the named reserved balances, returning unreserved amount.
///
/// Is a no-op if the value to be unreserved is zero.
fn unreserve_all_named(
id: &Self::ReserveIdentifier,
currency_id: Self::CurrencyId,
who: &AccountId,
) -> Self::Balance {
let value = Self::reserved_balance_named(id, currency_id, who);
Self::unreserve_named(id, currency_id, who, value);
value
}
/// Slash all the reserved balance, returning the amount that was unable to
/// be slashed.
///
/// Is a no-op if the value to be slashed is zero.
fn slash_all_reserved_named(
id: &Self::ReserveIdentifier,
currency_id: Self::CurrencyId,
who: &AccountId,
) -> Self::Balance {
let value = Self::reserved_balance_named(id, currency_id, who);
Self::slash_reserved_named(id, currency_id, who, value)
}
/// Move all the named reserved balance of one account into the balance of
/// another, according to `status`. If `status` is `Reserved`, the balance
/// will be reserved with given `id`.
///
/// Is a no-op if:
/// - the value to be moved is zero; or
/// - the `slashed` id equal to `beneficiary` and the `status` is
/// `Reserved`.
fn repatriate_all_reserved_named(
id: &Self::ReserveIdentifier,
currency_id: Self::CurrencyId,
slashed: &AccountId,
beneficiary: &AccountId,
status: BalanceStatus,
) -> DispatchResult {
let value = Self::reserved_balance_named(id, currency_id, slashed);
Self::repatriate_reserved_named(id, currency_id, slashed, beneficiary, value, status).map(|_| ())
}
}
/// Abstraction over a fungible (single) currency system.
pub trait BasicCurrency<AccountId> {
/// The balance of an account.
type Balance: AtLeast32BitUnsigned + FullCodec + Copy + MaybeSerializeDeserialize + Debug + Default + MaxEncodedLen;
// Public immutables
/// Existential deposit.
fn minimum_balance() -> Self::Balance;
/// The total amount of issuance.
fn total_issuance() -> Self::Balance;
/// The combined balance of `who`.
fn total_balance(who: &AccountId) -> Self::Balance;
/// The free balance of `who`.
fn free_balance(who: &AccountId) -> Self::Balance;
/// A dry-run of `withdraw`. Returns `Ok` iff the account is able to make a
/// withdrawal of the given amount.
fn ensure_can_withdraw(who: &AccountId, amount: Self::Balance) -> DispatchResult;
// Public mutables
/// Transfer some amount from one account to another.
fn transfer(from: &AccountId, to: &AccountId, amount: Self::Balance) -> DispatchResult;
/// Add `amount` to the balance of `who` and increase total issuance.
fn deposit(who: &AccountId, amount: Self::Balance) -> DispatchResult;
/// Remove `amount` from the balance of `who` and reduce total issuance.
fn withdraw(who: &AccountId, amount: Self::Balance) -> DispatchResult;
/// Same result as `slash(who, value)` (but without the side-effects)
/// assuming there are no balance changes in the meantime and only the
/// reserved balance is not taken into account.
fn can_slash(who: &AccountId, value: Self::Balance) -> bool;
/// Deduct the balance of `who` by up to `amount`.
///
/// As much funds up to `amount` will be deducted as possible. If this is
/// less than `amount`,then a non-zero value will be returned.
fn slash(who: &AccountId, amount: Self::Balance) -> Self::Balance;
}
/// Extended `BasicCurrency` with additional helper types and methods.
pub trait BasicCurrencyExtended<AccountId>: BasicCurrency<AccountId> {
/// The signed type for balance related operations, typically signed int.
type Amount: arithmetic::Signed
+ TryInto<Self::Balance>
+ TryFrom<Self::Balance>
+ arithmetic::SimpleArithmetic
+ Codec
+ Copy
+ MaybeSerializeDeserialize
+ Debug
+ Default
+ MaxEncodedLen;
/// Add or remove abs(`by_amount`) from the balance of `who`. If positive
/// `by_amount`, do add, else do remove.
fn update_balance(who: &AccountId, by_amount: Self::Amount) -> DispatchResult;
}
/// A fungible single currency system whose accounts can have liquidity
/// restrictions.
pub trait BasicLockableCurrency<AccountId>: BasicCurrency<AccountId> {
/// The quantity used to denote time; usually just a `BlockNumber`.
type Moment;
/// Create a new balance lock on account `who`.
///
/// If the new lock is valid (i.e. not already expired), it will push the
/// struct to the `Locks` vec in storage. Note that you can lock more funds
/// than a user has.
///
/// If the lock `lock_id` already exists, this will update it.
fn set_lock(lock_id: LockIdentifier, who: &AccountId, amount: Self::Balance) -> DispatchResult;
/// Changes a balance lock (selected by `lock_id`) so that it becomes less
/// liquid in all parameters or creates a new one if it does not exist.
///
/// Calling `extend_lock` on an existing lock `lock_id` differs from
/// `set_lock` in that it applies the most severe constraints of the two,
/// while `set_lock` replaces the lock with the new parameters. As in,
/// `extend_lock` will set:
/// - maximum `amount`
fn extend_lock(lock_id: LockIdentifier, who: &AccountId, amount: Self::Balance) -> DispatchResult;
/// Remove an existing lock.
fn remove_lock(lock_id: LockIdentifier, who: &AccountId) -> DispatchResult;
}
/// A fungible single currency system where funds can be reserved from the user.
pub trait BasicReservableCurrency<AccountId>: BasicCurrency<AccountId> {
/// Same result as `reserve(who, value)` (but without the side-effects)
/// assuming there are no balance changes in the meantime.
fn can_reserve(who: &AccountId, value: Self::Balance) -> bool;
/// Deducts up to `value` from reserved balance of `who`. This function
/// cannot fail.
///
/// As much funds up to `value` will be deducted as possible. If the reserve
/// balance of `who` is less than `value`, then a non-zero second item will
/// be returned.
fn slash_reserved(who: &AccountId, value: Self::Balance) -> Self::Balance;
/// The amount of the balance of a given account that is externally
/// reserved; this can still get slashed, but gets slashed last of all.
///
/// This balance is a 'reserve' balance that other subsystems use in order
/// to set aside tokens that are still 'owned' by the account holder, but
/// which are suspendable.
fn reserved_balance(who: &AccountId) -> Self::Balance;
/// Moves `value` from balance to reserved balance.
///
/// If the free balance is lower than `value`, then no funds will be moved
/// and an `Err` will be returned to notify of this. This is different
/// behavior than `unreserve`.
fn reserve(who: &AccountId, value: Self::Balance) -> DispatchResult;
/// Moves up to `value` from reserved balance to free balance. This function
/// cannot fail.
///
/// As much funds up to `value` will be moved as possible. If the reserve
/// balance of `who` is less than `value`, then the remaining amount will be
/// returned.
///
/// # NOTES
///
/// - This is different from `reserve`.
fn unreserve(who: &AccountId, value: Self::Balance) -> Self::Balance;
/// Moves up to `value` from reserved balance of account `slashed` to
/// balance of account `beneficiary`. `beneficiary` must exist for this to
/// succeed. If it does not, `Err` will be returned. Funds will be placed in
/// either the `free` balance or the `reserved` balance, depending on the
/// `status`.
///
/// As much funds up to `value` will be deducted as possible. If this is
/// less than `value`, then `Ok(non_zero)` will be returned.
fn repatriate_reserved(
slashed: &AccountId,
beneficiary: &AccountId,
value: Self::Balance,
status: BalanceStatus,
) -> result::Result<Self::Balance, DispatchError>;
}
/// Handler for account which has dust, need to burn or recycle it
pub trait OnDust<AccountId, CurrencyId, Balance> {
fn on_dust(who: &AccountId, currency_id: CurrencyId, amount: Balance);
}
impl<AccountId, CurrencyId, Balance> OnDust<AccountId, CurrencyId, Balance> for () {
fn on_dust(_: &AccountId, _: CurrencyId, _: Balance) {}
}
pub trait TransferAll<AccountId> {
fn transfer_all(source: &AccountId, dest: &AccountId) -> DispatchResult;
}
#[impl_trait_for_tuples::impl_for_tuples(5)]
impl<AccountId> TransferAll<AccountId> for Tuple {
#[transactional]
fn transfer_all(source: &AccountId, dest: &AccountId) -> DispatchResult {
for_tuples!( #( {
Tuple::transfer_all(source, dest)?;
} )* );
Ok(())
}
}
| 37.01487 | 117 | 0.709451 |
299202981b1f3ad2b72e161eb737f507a0d15455 | 4,738 | use if_chain::if_chain;
use rustc_errors::Applicability;
use rustc_hir::def_id::DefId;
use rustc_hir::intravisit::{walk_expr, NestedVisitorMap, Visitor};
use rustc_hir::{Block, Expr, ExprKind, PatKind, StmtKind};
use rustc_lint::{LateContext, LateLintPass, LintContext};
use rustc_middle::hir::map::Map;
use rustc_middle::lint::in_external_macro;
use rustc_middle::ty::subst::GenericArgKind;
use rustc_session::{declare_lint_pass, declare_tool_lint};
use crate::utils::{in_macro, match_qpath, snippet_opt, span_lint_and_then};
declare_clippy_lint! {
/// **What it does:** Checks for `let`-bindings, which are subsequently
/// returned.
///
/// **Why is this bad?** It is just extraneous code. Remove it to make your code
/// more rusty.
///
/// **Known problems:** None.
///
/// **Example:**
/// ```rust
/// fn foo() -> String {
/// let x = String::new();
/// x
/// }
/// ```
/// instead, use
/// ```
/// fn foo() -> String {
/// String::new()
/// }
/// ```
pub LET_AND_RETURN,
style,
"creating a let-binding and then immediately returning it like `let x = expr; x` at the end of a block"
}
declare_lint_pass!(LetReturn => [LET_AND_RETURN]);
impl<'a, 'tcx> LateLintPass<'a, 'tcx> for LetReturn {
fn check_block(&mut self, cx: &LateContext<'a, 'tcx>, block: &'tcx Block<'_>) {
// we need both a let-binding stmt and an expr
if_chain! {
if let Some(retexpr) = block.expr;
if let Some(stmt) = block.stmts.iter().last();
if let StmtKind::Local(local) = &stmt.kind;
if local.ty.is_none();
if local.attrs.is_empty();
if let Some(initexpr) = &local.init;
if let PatKind::Binding(.., ident, _) = local.pat.kind;
if let ExprKind::Path(qpath) = &retexpr.kind;
if match_qpath(qpath, &[&*ident.name.as_str()]);
if !last_statement_borrows(cx, initexpr);
if !in_external_macro(cx.sess(), initexpr.span);
if !in_external_macro(cx.sess(), retexpr.span);
if !in_external_macro(cx.sess(), local.span);
if !in_macro(local.span);
then {
span_lint_and_then(
cx,
LET_AND_RETURN,
retexpr.span,
"returning the result of a `let` binding from a block",
|err| {
err.span_label(local.span, "unnecessary `let` binding");
if let Some(snippet) = snippet_opt(cx, initexpr.span) {
err.multipart_suggestion(
"return the expression directly",
vec![
(local.span, String::new()),
(retexpr.span, snippet),
],
Applicability::MachineApplicable,
);
} else {
err.span_help(initexpr.span, "this expression can be directly returned");
}
},
);
}
}
}
}
fn last_statement_borrows<'tcx>(cx: &LateContext<'_, 'tcx>, expr: &'tcx Expr<'tcx>) -> bool {
let mut visitor = BorrowVisitor { cx, borrows: false };
walk_expr(&mut visitor, expr);
visitor.borrows
}
struct BorrowVisitor<'a, 'tcx> {
cx: &'a LateContext<'a, 'tcx>,
borrows: bool,
}
impl BorrowVisitor<'_, '_> {
fn fn_def_id(&self, expr: &Expr<'_>) -> Option<DefId> {
match &expr.kind {
ExprKind::MethodCall(..) => self.cx.tables().type_dependent_def_id(expr.hir_id),
ExprKind::Call(
Expr {
kind: ExprKind::Path(qpath),
..
},
..,
) => self.cx.tables().qpath_res(qpath, expr.hir_id).opt_def_id(),
_ => None,
}
}
}
impl<'tcx> Visitor<'tcx> for BorrowVisitor<'_, 'tcx> {
type Map = Map<'tcx>;
fn visit_expr(&mut self, expr: &'tcx Expr<'_>) {
if self.borrows {
return;
}
if let Some(def_id) = self.fn_def_id(expr) {
self.borrows = self
.cx
.tcx
.fn_sig(def_id)
.output()
.skip_binder()
.walk()
.any(|arg| matches!(arg.unpack(), GenericArgKind::Lifetime(_)));
}
walk_expr(self, expr);
}
fn nested_visit_map(&mut self) -> NestedVisitorMap<Self::Map> {
NestedVisitorMap::None
}
}
| 33.366197 | 107 | 0.508231 |
482876cac8c267ed7b4b1e572c0a97b33c887855 | 1,105 | use crate::route::Route;
use yew::prelude::*;
use yew_router::components::RouterAnchor;
pub struct Nav {}
impl Component for Nav {
type Message = ();
type Properties = ();
fn create(_: Self::Properties, _link: ComponentLink<Self>) -> Self {
Self {}
}
fn update(&mut self, _msg: Self::Message) -> ShouldRender {
true
}
fn change(&mut self, _: Self::Properties) -> ShouldRender {
true
}
fn view(&self) -> Html {
type Anchor = RouterAnchor<Route>;
html! {
<nav class="nav">
<div class="nav-container">
<div class="nav-inner-ctn">
<Anchor route=Route::HomePage classes="nav_home_button">
<div class="nav_title">{"Home"}</div>
</Anchor>
<Anchor route=Route::About classes="nav_home_button">
<div class="nav_title">{"About"}</div>
</Anchor>
</div>
</div>
</nav>
}
}
}
| 26.309524 | 80 | 0.471493 |
b9587e7a75ba780d5bbbf668a4c2facf7b36a9f8 | 3,379 | #![feature(test)]
pub mod adapter;
pub mod consensus;
mod engine;
pub mod fixed_types;
pub mod message;
pub mod status;
pub mod synchronization;
#[cfg(test)]
mod tests;
pub mod trace;
pub mod util;
pub mod wal;
mod wal_proto;
pub use crate::adapter::OverlordConsensusAdapter;
pub use crate::consensus::OverlordConsensus;
pub use crate::synchronization::{OverlordSynchronization, RichBlock};
pub use crate::wal::SignedTxsWAL;
pub use overlord::{types::Node, DurationConfig};
use std::error::Error;
use derive_more::Display;
use common_crypto::Error as CryptoError;
use protocol::types::Hash;
use protocol::{ProtocolError, ProtocolErrorKind};
#[derive(Clone, Debug, Display, PartialEq, Eq)]
pub enum ConsensusType {
#[display(fmt = "Signed Proposal")]
SignedProposal,
#[display(fmt = "Signed Vote")]
SignedVote,
#[display(fmt = "Aggregated Vote")]
AggregateVote,
#[display(fmt = "Rich Height")]
RichHeight,
#[display(fmt = "Rpc Pull Blocks")]
RpcPullBlocks,
#[display(fmt = "Rpc Pull Transactions")]
RpcPullTxs,
#[display(fmt = "Signed Choke")]
SignedChoke,
#[display(fmt = "WAL Signed Transactions")]
WALSignedTxs,
}
/// Consensus errors defines here.
#[derive(Debug, Display)]
pub enum ConsensusError {
/// Send consensus message error.
#[display(fmt = "Send {:?} message failed", _0)]
SendMsgErr(ConsensusType),
/// Check block error.
#[display(fmt = "Check invalid prev_hash, expect {:?} get {:?}", expect, actual)]
InvalidPrevhash { expect: Hash, actual: Hash },
#[display(fmt = "Check invalid status vec")]
InvalidStatusVec,
/// Decode consensus message error.
#[display(fmt = "Decode {:?} message failed", _0)]
DecodeErr(ConsensusType),
/// Encode consensus message error.
#[display(fmt = "Encode {:?} message failed", _0)]
EncodeErr(ConsensusType),
/// Overlord consensus protocol error.
#[display(fmt = "Overlord error {:?}", _0)]
OverlordErr(Box<dyn Error + Send>),
/// Consensus missed last block proof.
#[display(fmt = "Consensus missed proof of {} block", _0)]
MissingProof(u64),
/// Consensus missed the pill.
#[display(fmt = "Consensus missed pill cooresponding {:?}", _0)]
MissingPill(Hash),
/// Consensus missed the block header.
#[display(fmt = "Consensus missed block header of {} block", _0)]
MissingBlockHeader(u64),
/// This boxed error should be a `CryptoError`.
#[display(fmt = "Crypto error {:?}", _0)]
CryptoErr(Box<CryptoError>),
/// The synchronous block does not pass the checks.
#[display(fmt = "Synchronization {} block error", _0)]
SyncBlockHashErr(u64),
/// The Rpc response mismatch the request.
#[display(fmt = "Synchronization Rpc {:?} message mismatch", _0)]
RpcErr(ConsensusType),
///
#[display(fmt = "Get merkle root failed {:?}", _0)]
MerkleErr(String),
///
#[display(fmt = "Execute transactions error {:?}", _0)]
ExecuteErr(String),
///
WALErr(std::io::Error),
/// Other error used for very few errors.
#[display(fmt = "{:?}", _0)]
Other(String),
}
impl Error for ConsensusError {}
impl From<ConsensusError> for ProtocolError {
fn from(err: ConsensusError) -> ProtocolError {
ProtocolError::new(ProtocolErrorKind::Consensus, Box::new(err))
}
}
| 25.598485 | 85 | 0.656703 |
e5f3b25c691ab87fb7d242787450eb9d958bc21e | 988 | // This file is Copyright its original authors, visible in version control
// history.
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
// This file is auto-generated by gen_target.sh based on msg_target_template.txt
// To modify it, modify msg_target_template.txt and run gen_target.sh instead.
use lightning::ln::msgs;
use msg_targets::utils::VecWriter;
use utils::test_logger;
#[inline]
pub fn msg_channel_ready_test<Out: test_logger::Output>(data: &[u8], _out: Out) {
test_msg_simple!(msgs::ChannelReady, data);
}
#[no_mangle]
pub extern "C" fn msg_channel_ready_run(data: *const u8, datalen: usize) {
let data = unsafe { std::slice::from_raw_parts(data, datalen) };
test_msg_simple!(msgs::ChannelReady, data);
}
| 35.285714 | 81 | 0.745951 |
5d21b2f9011b6114de1cd8d6e2cc7d13a4341753 | 4,169 | // Copyright 2020 Netwarps Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use futures::{
channel::{mpsc, oneshot},
prelude::*,
};
use libp2prs_core::PeerId;
use crate::connection::ConnectionId;
use crate::identify::IdentifyInfo;
use crate::network::NetworkInfo;
use crate::substream::{StreamId, Substream};
use crate::{ProtocolId, SwarmError};
type Result<T> = std::result::Result<T, SwarmError>;
/// The control commands for [`Swarm`].
///
/// The `Swarm` controller manipulates the [`Swarm`] via these commands.
///
#[derive(Debug)]
#[allow(dead_code)]
pub enum SwarmControlCmd {
/// Open a connection to the remote peer.
NewConnection(PeerId, oneshot::Sender<Result<()>>),
/// Close any connection to the remote peer.
CloseConnection(PeerId, oneshot::Sender<Result<()>>),
/// Open a new stream specified with protocol Ids to the remote peer.
NewStream(PeerId, Vec<ProtocolId>, oneshot::Sender<Result<Substream>>),
/// Close a stream specified.
CloseStream(ConnectionId, StreamId),
/// Close the whole connection.
CloseSwarm,
/// Retrieve network information of Swarm
NetworkInfo(oneshot::Sender<Result<NetworkInfo>>),
/// Retrieve network information of Swarm
IdentifyInfo(oneshot::Sender<Result<IdentifyInfo>>),
}
/// The `Swarm` controller.
///
/// While a Yamux connection makes progress via its `next_stream` method,
/// this controller can be used to concurrently direct the connection,
/// e.g. to open a new stream to the remote or to close the connection.
///
//#[derive(Debug)]
pub struct Control {
/// Command channel to `Connection`.
sender: mpsc::Sender<SwarmControlCmd>,
}
impl Clone for Control {
fn clone(&self) -> Self {
Control {
sender: self.sender.clone(),
}
}
}
impl Control {
pub(crate) fn new(sender: mpsc::Sender<SwarmControlCmd>) -> Self {
Control { sender }
}
/// make a connection to the remote.
pub async fn new_connection(&mut self, peerd_id: PeerId) -> Result<()> {
let (tx, rx) = oneshot::channel();
self.sender.send(SwarmControlCmd::NewConnection(peerd_id.clone(), tx)).await?;
rx.await?
}
/// Open a new outbound stream towards the remote.
pub async fn new_stream(&mut self, peerd_id: PeerId, pids: Vec<ProtocolId>) -> Result<Substream> {
let (tx, rx) = oneshot::channel();
self.sender.send(SwarmControlCmd::NewStream(peerd_id.clone(), pids, tx)).await?;
rx.await?
}
/// Retrieve network statistics from Swarm.
pub async fn retrieve_networkinfo(&mut self) -> Result<NetworkInfo> {
let (tx, rx) = oneshot::channel();
self.sender.send(SwarmControlCmd::NetworkInfo(tx)).await?;
rx.await?
}
/// Close the connection.
pub async fn close(&mut self) -> Result<()> {
// SwarmControlCmd::CloseSwarm doesn't need a response from Swarm
if self.sender.send(SwarmControlCmd::CloseSwarm).await.is_err() {
// The receiver is closed which means the connection is already closed.
return Ok(());
}
Ok(())
}
}
| 36.893805 | 102 | 0.683617 |
294b7f9a728b84feff81c551ac8e186ddda998d0 | 33,419 | // Copyright 2019-2021 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any
// person obtaining a copy of this software and associated
// documentation files (the "Software"), to deal in the
// Software without restriction, including without
// limitation the rights to use, copy, modify, merge,
// publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software
// is furnished to do so, subject to the following
// conditions:
//
// The above copyright notice and this permission notice
// shall be included in all copies or substantial portions
// of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use crate::server::helpers::{send_call_error, send_error, send_response};
use crate::server::resource_limiting::{ResourceGuard, ResourceTable, ResourceVec, Resources};
use beef::Cow;
use futures_channel::{mpsc, oneshot};
use futures_util::{future::BoxFuture, FutureExt, StreamExt};
use jsonrpsee_types::to_json_raw_value;
use jsonrpsee_types::v2::error::{invalid_subscription_err, CALL_EXECUTION_FAILED_CODE};
use jsonrpsee_types::{
error::{Error, SubscriptionClosedError},
traits::ToRpcParams,
v2::{
ErrorCode, Id, Params, Request, Response, SubscriptionId as RpcSubscriptionId, SubscriptionPayload,
SubscriptionResponse, TwoPointZero,
},
DeserializeOwned,
};
use parking_lot::Mutex;
use rustc_hash::FxHashMap;
use serde::Serialize;
use serde_json::value::RawValue;
use std::collections::hash_map::Entry;
use std::fmt::Debug;
use std::future::Future;
use std::ops::{Deref, DerefMut};
use std::sync::Arc;
/// A `MethodCallback` is an RPC endpoint, callable with a standard JSON-RPC request,
/// implemented as a function pointer to a `Fn` function taking four arguments:
/// the `id`, `params`, a channel the function uses to communicate the result (or error)
/// back to `jsonrpsee`, and the connection ID (useful for the websocket transport).
pub type SyncMethod = Arc<dyn Send + Sync + Fn(Id, Params, &MethodSink, ConnectionId, MaxResponseSize)>;
/// Similar to [`SyncMethod`], but represents an asynchronous handler and takes an additional argument containing a [`ResourceGuard`] if configured.
pub type AsyncMethod<'a> = Arc<
dyn Send + Sync + Fn(Id<'a>, Params<'a>, MethodSink, Option<ResourceGuard>, MaxResponseSize) -> BoxFuture<'a, ()>,
>;
/// Connection ID, used for stateful protocol such as WebSockets.
/// For stateless protocols such as http it's unused, so feel free to set it some hardcoded value.
pub type ConnectionId = usize;
/// Subscription ID.
pub type SubscriptionId = u64;
/// Sink that is used to send back the result to the server for a specific method.
pub type MethodSink = mpsc::UnboundedSender<String>;
/// Max response size in bytes for a executed call.
pub type MaxResponseSize = u32;
type Subscribers = Arc<Mutex<FxHashMap<SubscriptionKey, (MethodSink, oneshot::Receiver<()>)>>>;
/// Represent a unique subscription entry based on [`SubscriptionId`] and [`ConnectionId`].
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
struct SubscriptionKey {
conn_id: ConnectionId,
sub_id: SubscriptionId,
}
/// Callback wrapper that can be either sync or async.
#[derive(Clone)]
enum MethodKind {
/// Synchronous method handler.
Sync(SyncMethod),
/// Asynchronous method handler.
Async(AsyncMethod<'static>),
}
/// Information about resources the method uses during its execution. Initialized when the the server starts.
#[derive(Clone, Debug)]
enum MethodResources {
/// Uninitialized resource table, mapping string label to units.
Uninitialized(Box<[(&'static str, u16)]>),
/// Initialized resource table containing units for each `ResourceId`.
Initialized(ResourceTable),
}
/// Method callback wrapper that contains a sync or async closure,
/// plus a table with resources it needs to claim to run
#[derive(Clone, Debug)]
pub struct MethodCallback {
callback: MethodKind,
resources: MethodResources,
}
/// Builder for configuring resources used by a method.
#[derive(Debug)]
pub struct MethodResourcesBuilder<'a> {
build: ResourceVec<(&'static str, u16)>,
callback: &'a mut MethodCallback,
}
impl<'a> MethodResourcesBuilder<'a> {
/// Define how many units of a given named resource the method uses during its execution.
pub fn resource(mut self, label: &'static str, units: u16) -> Result<Self, Error> {
self.build.try_push((label, units)).map_err(|_| Error::MaxResourcesReached)?;
Ok(self)
}
}
impl<'a> Drop for MethodResourcesBuilder<'a> {
fn drop(&mut self) {
self.callback.resources = MethodResources::Uninitialized(self.build[..].into());
}
}
impl MethodCallback {
fn new_sync(callback: SyncMethod) -> Self {
MethodCallback { callback: MethodKind::Sync(callback), resources: MethodResources::Uninitialized([].into()) }
}
fn new_async(callback: AsyncMethod<'static>) -> Self {
MethodCallback { callback: MethodKind::Async(callback), resources: MethodResources::Uninitialized([].into()) }
}
/// Attempt to claim resources prior to executing a method. On success returns a guard that releases
/// claimed resources when dropped.
pub fn claim(&self, name: &str, resources: &Resources) -> Result<ResourceGuard, Error> {
match self.resources {
MethodResources::Uninitialized(_) => Err(Error::UninitializedMethod(name.into())),
MethodResources::Initialized(units) => resources.claim(units),
}
}
/// Execute the callback, sending the resulting JSON (success or error) to the specified sink.
pub fn execute(
&self,
tx: &MethodSink,
req: Request<'_>,
conn_id: ConnectionId,
claimed: Option<ResourceGuard>,
max_response_size: MaxResponseSize,
) -> Option<BoxFuture<'static, ()>> {
let id = req.id.clone();
let params = Params::new(req.params.map(|params| params.get()));
match &self.callback {
MethodKind::Sync(callback) => {
tracing::trace!(
"[MethodCallback::execute] Executing sync callback, params={:?}, req.id={:?}, conn_id={:?}",
params,
id,
conn_id
);
(callback)(id, params, tx, conn_id, max_response_size);
// Release claimed resources
drop(claimed);
None
}
MethodKind::Async(callback) => {
let tx = tx.clone();
let params = params.into_owned();
let id = id.into_owned();
tracing::trace!(
"[MethodCallback::execute] Executing async callback, params={:?}, req.id={:?}, conn_id={:?}",
params,
id,
conn_id
);
Some((callback)(id, params, tx, claimed, max_response_size))
}
}
}
}
impl Debug for MethodKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Async(_) => write!(f, "Async"),
Self::Sync(_) => write!(f, "Sync"),
}
}
}
/// Reference-counted, clone-on-write collection of synchronous and asynchronous methods.
#[derive(Default, Debug, Clone)]
pub struct Methods {
callbacks: Arc<FxHashMap<&'static str, MethodCallback>>,
}
impl Methods {
/// Creates a new empty [`Methods`].
pub fn new() -> Self {
Self::default()
}
fn verify_method_name(&mut self, name: &'static str) -> Result<(), Error> {
if self.callbacks.contains_key(name) {
return Err(Error::MethodAlreadyRegistered(name.into()));
}
Ok(())
}
/// Inserts the method callback for a given name, or returns an error if the name was already taken.
/// On success it returns a mut reference to the [`MethodCallback`] just inserted.
fn verify_and_insert(
&mut self,
name: &'static str,
callback: MethodCallback,
) -> Result<&mut MethodCallback, Error> {
match self.mut_callbacks().entry(name) {
Entry::Occupied(_) => Err(Error::MethodAlreadyRegistered(name.into())),
Entry::Vacant(vacant) => Ok(vacant.insert(callback)),
}
}
/// Initialize resources for all methods in this collection. This method has no effect if called more than once.
pub fn initialize_resources(mut self, resources: &Resources) -> Result<Self, Error> {
let callbacks = self.mut_callbacks();
for (&method_name, callback) in callbacks.iter_mut() {
if let MethodResources::Uninitialized(uninit) = &callback.resources {
let mut map = resources.defaults;
for &(label, units) in uninit.iter() {
let idx = match resources.labels.iter().position(|&l| l == label) {
Some(idx) => idx,
None => return Err(Error::ResourceNameNotFoundForMethod(label, method_name)),
};
// If resource capacity set to `0`, we ignore the unit value of the method
// and set it to `0` as well, effectively making the resource unlimited.
if resources.capacities[idx] == 0 {
map[idx] = 0;
} else {
map[idx] = units;
}
}
callback.resources = MethodResources::Initialized(map);
}
}
Ok(self)
}
/// Helper for obtaining a mut ref to the callbacks HashMap.
fn mut_callbacks(&mut self) -> &mut FxHashMap<&'static str, MethodCallback> {
Arc::make_mut(&mut self.callbacks)
}
/// Merge two [`Methods`]'s by adding all [`MethodCallback`]s from `other` into `self`.
/// Fails if any of the methods in `other` is present already.
pub fn merge(&mut self, other: impl Into<Methods>) -> Result<(), Error> {
let mut other = other.into();
for name in other.callbacks.keys() {
self.verify_method_name(name)?;
}
let callbacks = self.mut_callbacks();
for (name, callback) in other.mut_callbacks().drain() {
callbacks.insert(name, callback);
}
Ok(())
}
/// Returns the method callback.
pub fn method(&self, method_name: &str) -> Option<&MethodCallback> {
self.callbacks.get(method_name)
}
/// Attempt to execute a callback, sending the resulting JSON (success or error) to the specified sink.
pub fn execute(
&self,
tx: &MethodSink,
req: Request,
conn_id: ConnectionId,
max_response_size: MaxResponseSize,
) -> Option<BoxFuture<'static, ()>> {
tracing::trace!("[Methods::execute] Executing request: {:?}", req);
match self.callbacks.get(&*req.method) {
Some(callback) => callback.execute(tx, req, conn_id, None, max_response_size),
None => {
send_error(req.id, tx, ErrorCode::MethodNotFound.into());
None
}
}
}
/// Attempt to execute a callback while checking that the call does not exhaust the available resources, sending the resulting JSON (success or error) to the specified sink.
pub fn execute_with_resources(
&self,
tx: &MethodSink,
req: Request,
conn_id: ConnectionId,
resources: &Resources,
max_response_size: MaxResponseSize,
) -> Option<BoxFuture<'static, ()>> {
tracing::trace!("[Methods::execute_with_resources] Executing request: {:?}", req);
match self.callbacks.get(&*req.method) {
Some(callback) => match callback.claim(&req.method, resources) {
Ok(guard) => callback.execute(tx, req, conn_id, Some(guard), max_response_size),
Err(err) => {
tracing::error!("[Methods::execute_with_resources] failed to lock resources: {:?}", err);
send_error(req.id, tx, ErrorCode::ServerIsBusy.into());
None
}
},
None => {
send_error(req.id, tx, ErrorCode::MethodNotFound.into());
None
}
}
}
/// Helper to call a method on the `RPC module` without having to spin up a server.
///
/// The params must be serializable as JSON array, see [`ToRpcParams`] for further documentation.
pub async fn call_with<Params: ToRpcParams>(&self, method: &str, params: Params) -> Option<String> {
let params = params.to_rpc_params().ok();
self.call(method, params).await
}
/// Helper alternative to `execute`, useful for writing unit tests without having to spin
/// a server up.
pub async fn call(&self, method: &str, params: Option<Box<RawValue>>) -> Option<String> {
let req = Request {
jsonrpc: TwoPointZero,
id: Id::Number(0),
method: Cow::borrowed(method),
params: params.as_deref(),
};
let (tx, mut rx) = mpsc::unbounded();
if let Some(fut) = self.execute(&tx, req, 0, MaxResponseSize::MAX) {
fut.await;
}
rx.next().await
}
/// Test helper that sets up a subscription using the given `method`. Returns a tuple of the
/// [`SubscriptionId`] and a channel on which subscription JSON payloads can be received.
pub async fn test_subscription(&self, method: &str, params: impl ToRpcParams) -> TestSubscription {
let params = params.to_rpc_params().expect("valid JSON-RPC params");
tracing::trace!("[Methods::test_subscription] Calling subscription method: {:?}, params: {:?}", method, params);
let req =
Request { jsonrpc: TwoPointZero, id: Id::Number(0), method: Cow::borrowed(method), params: Some(¶ms) };
let (tx, mut rx) = mpsc::unbounded();
if let Some(fut) = self.execute(&tx, req, 0, MaxResponseSize::MAX) {
fut.await;
}
let response = rx.next().await.expect("Could not establish subscription.");
let subscription_response = serde_json::from_str::<Response<SubscriptionId>>(&response)
.unwrap_or_else(|_| panic!("Could not deserialize subscription response {:?}", response));
let sub_id = subscription_response.result;
TestSubscription { tx, rx, sub_id }
}
/// Returns an `Iterator` with all the method names registered on this server.
pub fn method_names(&self) -> impl Iterator<Item = &'static str> + '_ {
self.callbacks.keys().copied()
}
}
impl<Context> Deref for RpcModule<Context> {
type Target = Methods;
fn deref(&self) -> &Methods {
&self.methods
}
}
impl<Context> DerefMut for RpcModule<Context> {
fn deref_mut(&mut self) -> &mut Methods {
&mut self.methods
}
}
/// Sets of JSON-RPC methods can be organized into a "module"s that are in turn registered on the server or,
/// alternatively, merged with other modules to construct a cohesive API. [`RpcModule`] wraps an additional context
/// argument that can be used to access data during call execution.
#[derive(Debug, Clone)]
pub struct RpcModule<Context> {
ctx: Arc<Context>,
methods: Methods,
}
impl<Context> RpcModule<Context> {
/// Create a new module with a given shared `Context`.
pub fn new(ctx: Context) -> Self {
Self { ctx: Arc::new(ctx), methods: Default::default() }
}
}
impl<Context> From<RpcModule<Context>> for Methods {
fn from(module: RpcModule<Context>) -> Methods {
module.methods
}
}
impl<Context: Send + Sync + 'static> RpcModule<Context> {
/// Register a new synchronous RPC method, which computes the response with the given callback.
pub fn register_method<R, F>(
&mut self,
method_name: &'static str,
callback: F,
) -> Result<MethodResourcesBuilder, Error>
where
Context: Send + Sync + 'static,
R: Serialize,
F: Fn(Params, &Context) -> Result<R, Error> + Send + Sync + 'static,
{
let ctx = self.ctx.clone();
let callback = self.methods.verify_and_insert(
method_name,
MethodCallback::new_sync(Arc::new(move |id, params, tx, _, max_response_size| {
match callback(params, &*ctx) {
Ok(res) => send_response(id, tx, res, max_response_size),
Err(err) => send_call_error(id, tx, err),
};
})),
)?;
Ok(MethodResourcesBuilder { build: ResourceVec::new(), callback })
}
/// Register a new asynchronous RPC method, which computes the response with the given callback.
pub fn register_async_method<R, Fun, Fut>(
&mut self,
method_name: &'static str,
callback: Fun,
) -> Result<MethodResourcesBuilder, Error>
where
R: Serialize + Send + Sync + 'static,
Fut: Future<Output = Result<R, Error>> + Send,
Fun: (Fn(Params<'static>, Arc<Context>) -> Fut) + Copy + Send + Sync + 'static,
{
let ctx = self.ctx.clone();
let callback = self.methods.verify_and_insert(
method_name,
MethodCallback::new_async(Arc::new(move |id, params, tx, claimed, max_response_size| {
let ctx = ctx.clone();
let future = async move {
match callback(params, ctx).await {
Ok(res) => send_response(id, &tx, res, max_response_size),
Err(err) => send_call_error(id, &tx, err),
};
// Release claimed resources
drop(claimed);
};
future.boxed()
})),
)?;
Ok(MethodResourcesBuilder { build: ResourceVec::new(), callback })
}
/// Register a new **blocking** synchronous RPC method, which computes the response with the given callback.
/// Unlike the regular [`register_method`](RpcModule::register_method), this method can block its thread and perform expensive computations.
pub fn register_blocking_method<R, F>(
&mut self,
method_name: &'static str,
callback: F,
) -> Result<MethodResourcesBuilder, Error>
where
Context: Send + Sync + 'static,
R: Serialize,
F: Fn(Params, Arc<Context>) -> Result<R, Error> + Copy + Send + Sync + 'static,
{
let ctx = self.ctx.clone();
let callback = self.methods.verify_and_insert(
method_name,
MethodCallback::new_async(Arc::new(move |id, params, tx, claimed, max_response_size| {
let ctx = ctx.clone();
tokio::task::spawn_blocking(move || {
match callback(params, ctx) {
Ok(res) => send_response(id, &tx, res, max_response_size),
Err(err) => send_call_error(id, &tx, err),
};
// Release claimed resources
drop(claimed);
})
.map(|err| {
tracing::error!("Join error for blocking RPC method: {:?}", err);
})
.boxed()
})),
)?;
Ok(MethodResourcesBuilder { build: ResourceVec::new(), callback })
}
/// Register a new RPC subscription that invokes s callback on every subscription call.
///
/// This method ensures that the `subscription_method_name` and `unsubscription_method_name` are unique.
/// The `notif_method_name` argument sets the content of the `method` field in the JSON document that
/// the server sends back to the client. The uniqueness of this value is not machine checked and it's up to
/// the user to ensure it is not used in any other [`RpcModule`] used in the server.
///
/// # Arguments
///
/// * `subscription_method_name` - name of the method to call to initiate a subscription
/// * `notif_method_name` - name of method to be used in the subscription payload (technically a JSON-RPC notification)
/// * `unsubscription_method` - name of the method to call to terminate a subscription
/// * `callback` - A callback to invoke on each subscription; it takes three parameters:
/// - [`Params`]: JSON-RPC parameters in the subscription call.
/// - [`SubscriptionSink`]: A sink to send messages to the subscriber.
/// - Context: Any type that can be embedded into the [`RpcModule`].
///
/// # Examples
///
/// ```no_run
///
/// use jsonrpsee_utils::server::rpc_module::RpcModule;
///
/// let mut ctx = RpcModule::new(99_usize);
/// ctx.register_subscription("sub", "notif_name", "unsub", |params, mut sink, ctx| {
/// let x: usize = params.one()?;
/// std::thread::spawn(move || {
/// let sum = x + (*ctx);
/// sink.send(&sum)
/// });
/// Ok(())
/// });
/// ```
pub fn register_subscription<F>(
&mut self,
subscribe_method_name: &'static str,
notif_method_name: &'static str,
unsubscribe_method_name: &'static str,
callback: F,
) -> Result<(), Error>
where
Context: Send + Sync + 'static,
F: Fn(Params, SubscriptionSink, Arc<Context>) -> Result<(), Error> + Send + Sync + 'static,
{
if subscribe_method_name == unsubscribe_method_name {
return Err(Error::SubscriptionNameConflict(subscribe_method_name.into()));
}
self.methods.verify_method_name(subscribe_method_name)?;
self.methods.verify_method_name(unsubscribe_method_name)?;
let ctx = self.ctx.clone();
let subscribers = Subscribers::default();
{
let subscribers = subscribers.clone();
self.methods.mut_callbacks().insert(
subscribe_method_name,
MethodCallback::new_sync(Arc::new(move |id, params, method_sink, conn_id, max_response_size| {
let (conn_tx, conn_rx) = oneshot::channel::<()>();
let sub_id = {
const JS_NUM_MASK: SubscriptionId = !0 >> 11;
let sub_id = rand::random::<SubscriptionId>() & JS_NUM_MASK;
let uniq_sub = SubscriptionKey { conn_id, sub_id };
subscribers.lock().insert(uniq_sub, (method_sink.clone(), conn_rx));
sub_id
};
send_response(id.clone(), method_sink, sub_id, max_response_size);
let sink = SubscriptionSink {
inner: method_sink.clone(),
method: notif_method_name,
subscribers: subscribers.clone(),
uniq_sub: SubscriptionKey { conn_id, sub_id },
is_connected: Some(conn_tx),
};
if let Err(err) = callback(params, sink, ctx.clone()) {
tracing::error!(
"subscribe call '{}' failed: {:?}, request id={:?}",
subscribe_method_name,
err,
id
);
send_error(id, method_sink, ErrorCode::ServerError(CALL_EXECUTION_FAILED_CODE).into());
}
})),
);
}
{
self.methods.mut_callbacks().insert(
unsubscribe_method_name,
MethodCallback::new_sync(Arc::new(move |id, params, tx, conn_id, max_response_size| {
let sub_id = match params.one() {
Ok(sub_id) => sub_id,
Err(_) => {
tracing::error!(
"unsubscribe call '{}' failed: couldn't parse subscription id, request id={:?}",
unsubscribe_method_name,
id
);
let err = to_json_raw_value(&"Invalid subscription ID type, must be integer").ok();
send_error(id, tx, invalid_subscription_err(err.as_deref()));
return;
}
};
if subscribers.lock().remove(&SubscriptionKey { conn_id, sub_id }).is_some() {
send_response(id, tx, "Unsubscribed", max_response_size);
} else {
let err = to_json_raw_value(&format!("Invalid subscription ID={}", sub_id)).ok();
send_error(id, tx, invalid_subscription_err(err.as_deref()))
}
})),
);
}
Ok(())
}
/// Register an alias for an existing_method. Alias uniqueness is enforced.
pub fn register_alias(&mut self, alias: &'static str, existing_method: &'static str) -> Result<(), Error> {
self.methods.verify_method_name(alias)?;
let callback = match self.methods.callbacks.get(existing_method) {
Some(callback) => callback.clone(),
None => return Err(Error::MethodNotFound(existing_method.into())),
};
self.methods.mut_callbacks().insert(alias, callback);
Ok(())
}
}
/// Represents a single subscription.
#[derive(Debug)]
pub struct SubscriptionSink {
/// Sink.
inner: mpsc::UnboundedSender<String>,
/// MethodCallback.
method: &'static str,
/// Unique subscription.
uniq_sub: SubscriptionKey,
/// Shared Mutex of subscriptions for this method.
subscribers: Subscribers,
/// A type to track whether the subscription is active (the subscriber is connected).
///
/// None - implies that the subscription as been closed.
is_connected: Option<oneshot::Sender<()>>,
}
impl SubscriptionSink {
/// Send a message back to subscribers.
pub fn send<T: Serialize>(&mut self, result: &T) -> Result<(), Error> {
let msg = self.build_message(result)?;
self.inner_send(msg).map_err(Into::into)
}
fn build_message<T: Serialize>(&self, result: &T) -> Result<String, Error> {
serde_json::to_string(&SubscriptionResponse {
jsonrpc: TwoPointZero,
method: self.method,
params: SubscriptionPayload { subscription: RpcSubscriptionId::Num(self.uniq_sub.sub_id), result },
})
.map_err(Into::into)
}
fn inner_send(&mut self, msg: String) -> Result<(), Error> {
let res = match self.is_connected.as_ref() {
Some(conn) if !conn.is_canceled() => {
// unbounded send only fails if the receiver has been dropped.
self.inner.unbounded_send(msg).map_err(|_| {
Some(SubscriptionClosedError::new("Closed by the client (connection reset)", self.uniq_sub.sub_id))
})
}
Some(_) => Err(Some(SubscriptionClosedError::new("Closed by unsubscribe call", self.uniq_sub.sub_id))),
// NOTE(niklasad1): this should be unreachble, after the first error is detected the subscription is closed.
None => Err(None),
};
if let Err(Some(e)) = &res {
self.inner_close(e);
}
res.map_err(|e| {
let err = e.unwrap_or_else(|| SubscriptionClosedError::new("Close reason unknown", self.uniq_sub.sub_id));
Error::SubscriptionClosed(err)
})
}
/// Close the subscription sink with a customized error message.
pub fn close(&mut self, msg: &str) {
let err = SubscriptionClosedError::new(msg, self.uniq_sub.sub_id);
self.inner_close(&err);
}
fn inner_close(&mut self, err: &SubscriptionClosedError) {
self.is_connected.take();
if let Some((sink, _)) = self.subscribers.lock().remove(&self.uniq_sub) {
tracing::debug!("Closing subscription: {:?}", self.uniq_sub.sub_id);
let msg = self.build_message(err).expect("valid json infallible; qed");
let _ = sink.unbounded_send(msg);
}
}
}
impl Drop for SubscriptionSink {
fn drop(&mut self) {
let err = SubscriptionClosedError::new("Closed by the server", self.uniq_sub.sub_id);
self.inner_close(&err);
}
}
/// Wrapper struct that maintains a subscription for testing.
#[derive(Debug)]
pub struct TestSubscription {
tx: mpsc::UnboundedSender<String>,
rx: mpsc::UnboundedReceiver<String>,
sub_id: u64,
}
impl TestSubscription {
/// Close the subscription channel.
pub fn close(&mut self) {
self.tx.close_channel();
}
/// Get the subscription ID
pub fn subscription_id(&self) -> u64 {
self.sub_id
}
/// Returns `Some((val, sub_id))` for the next element of type T from the underlying stream,
/// otherwise `None` if the subscruption was closed.
///
/// # Panics
///
/// If the decoding the value as `T` fails.
pub async fn next<T: DeserializeOwned>(&mut self) -> Option<(T, jsonrpsee_types::v2::SubscriptionId)> {
let raw = self.rx.next().await?;
let val: SubscriptionResponse<T> =
serde_json::from_str(&raw).expect("valid response in TestSubscription::next()");
Some((val.params.result, val.params.subscription))
}
}
impl Drop for TestSubscription {
fn drop(&mut self) {
self.close();
}
}
#[cfg(test)]
mod tests {
use super::*;
use jsonrpsee_types::v2;
use serde::Deserialize;
use std::collections::HashMap;
#[test]
fn rpc_modules_with_different_contexts_can_be_merged() {
let cx = Vec::<u8>::new();
let mut mod1 = RpcModule::new(cx);
mod1.register_method("bla with Vec context", |_: Params, _| Ok(())).unwrap();
let mut mod2 = RpcModule::new(String::new());
mod2.register_method("bla with String context", |_: Params, _| Ok(())).unwrap();
mod1.merge(mod2).unwrap();
assert!(mod1.method("bla with Vec context").is_some());
assert!(mod1.method("bla with String context").is_some());
}
#[test]
fn rpc_context_modules_can_register_subscriptions() {
let cx = ();
let mut cxmodule = RpcModule::new(cx);
let _subscription = cxmodule.register_subscription("hi", "hi", "goodbye", |_, _, _| Ok(()));
assert!(cxmodule.method("hi").is_some());
assert!(cxmodule.method("goodbye").is_some());
}
#[test]
fn rpc_register_alias() {
let mut module = RpcModule::new(());
module.register_method("hello_world", |_: Params, _| Ok(())).unwrap();
module.register_alias("hello_foobar", "hello_world").unwrap();
assert!(module.method("hello_world").is_some());
assert!(module.method("hello_foobar").is_some());
}
#[tokio::test]
async fn calling_method_without_server() {
// Call sync method with no params
let mut module = RpcModule::new(());
module.register_method("boo", |_: Params, _| Ok(String::from("boo!"))).unwrap();
let result = module.call("boo", None).await.unwrap();
assert_eq!(result, r#"{"jsonrpc":"2.0","result":"boo!","id":0}"#);
// Call sync method with params
module
.register_method("foo", |params, _| {
let n: u16 = params.one()?;
Ok(n * 2)
})
.unwrap();
let result = module.call_with("foo", [3]).await.unwrap();
assert_eq!(result, r#"{"jsonrpc":"2.0","result":6,"id":0}"#);
// Call sync method with bad param
let result = module.call_with("foo", (false,)).await.unwrap();
assert_eq!(
result,
r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"invalid type: boolean `false`, expected u16 at line 1 column 6"},"id":0}"#
);
// Call async method with params and context
struct MyContext;
impl MyContext {
fn roo(&self, things: Vec<u8>) -> u16 {
things.iter().sum::<u8>().into()
}
}
let mut module = RpcModule::new(MyContext);
module
.register_async_method("roo", |params, ctx| {
let ns: Vec<u8> = params.parse().expect("valid params please");
async move { Ok(ctx.roo(ns)) }
})
.unwrap();
let result = module.call_with("roo", vec![12, 13]).await.unwrap();
assert_eq!(result, r#"{"jsonrpc":"2.0","result":25,"id":0}"#);
}
#[tokio::test]
async fn calling_method_without_server_using_proc_macro() {
use jsonrpsee::{proc_macros::rpc, types::async_trait};
// Setup
#[derive(Debug, Deserialize, Serialize)]
#[allow(unreachable_pub)]
pub struct Gun {
shoots: bool,
}
#[derive(Debug, Deserialize, Serialize)]
#[allow(unreachable_pub)]
pub struct Beverage {
ice: bool,
}
#[rpc(server)]
pub trait Cool {
/// Sync method, no params.
#[method(name = "rebel_without_cause")]
fn rebel_without_cause(&self) -> Result<bool, Error>;
/// Sync method.
#[method(name = "rebel")]
fn rebel(&self, gun: Gun, map: HashMap<u8, u8>) -> Result<String, Error>;
/// Async method.
#[method(name = "revolution")]
async fn can_have_any_name(&self, beverage: Beverage, some_bytes: Vec<u8>) -> Result<String, Error>;
}
struct CoolServerImpl;
#[async_trait]
impl CoolServer for CoolServerImpl {
fn rebel_without_cause(&self) -> Result<bool, Error> {
Ok(false)
}
fn rebel(&self, gun: Gun, map: HashMap<u8, u8>) -> Result<String, Error> {
Ok(format!("{} {:?}", map.values().len(), gun))
}
async fn can_have_any_name(&self, beverage: Beverage, some_bytes: Vec<u8>) -> Result<String, Error> {
Ok(format!("drink: {:?}, phases: {:?}", beverage, some_bytes))
}
}
let module = CoolServerImpl.into_rpc();
// Call sync method with no params
let result = module.call("rebel_without_cause", None).await.unwrap();
assert_eq!(result, r#"{"jsonrpc":"2.0","result":false,"id":0}"#);
// Call sync method with no params, alternative way.
let result = module.call_with::<[u8; 0]>("rebel_without_cause", []).await.unwrap();
assert_eq!(result, r#"{"jsonrpc":"2.0","result":false,"id":0}"#);
// Call sync method with params
let result = module.call_with("rebel", (Gun { shoots: true }, HashMap::<u8, u8>::default())).await.unwrap();
assert_eq!(result, r#"{"jsonrpc":"2.0","result":"0 Gun { shoots: true }","id":0}"#);
// Call sync method with bad params
let result = module.call_with("rebel", (Gun { shoots: true }, false)).await.unwrap();
assert_eq!(
result,
r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"invalid type: boolean `false`, expected a map at line 1 column 5"},"id":0}"#
);
// Call async method with params and context
let result = module.call_with("revolution", (Beverage { ice: true }, vec![1, 2, 3])).await.unwrap();
assert_eq!(result, r#"{"jsonrpc":"2.0","result":"drink: Beverage { ice: true }, phases: [1, 2, 3]","id":0}"#);
}
#[tokio::test]
async fn subscribing_without_server() {
let mut module = RpcModule::new(());
module
.register_subscription("my_sub", "my_sub", "my_unsub", |_, mut sink, _| {
let mut stream_data = vec!['0', '1', '2'];
std::thread::spawn(move || loop {
tracing::debug!("This is your friendly subscription sending data.");
if let Some(letter) = stream_data.pop() {
if let Err(Error::SubscriptionClosed(_)) = sink.send(&letter) {
return;
}
} else {
return;
}
std::thread::sleep(std::time::Duration::from_millis(500));
});
Ok(())
})
.unwrap();
let mut my_sub: TestSubscription = module.test_subscription("my_sub", Vec::<()>::new()).await;
for i in (0..=2).rev() {
let (val, id) = my_sub.next::<char>().await.unwrap();
assert_eq!(val, std::char::from_digit(i, 10).unwrap());
assert_eq!(id, v2::params::SubscriptionId::Num(my_sub.subscription_id()));
}
// The subscription is now closed by the server.
let (sub_closed_err, _) = my_sub.next::<SubscriptionClosedError>().await.unwrap();
assert_eq!(sub_closed_err.subscription_id(), my_sub.subscription_id());
assert_eq!(sub_closed_err.close_reason(), "Closed by the server");
}
#[tokio::test]
async fn close_test_subscribing_without_server() {
let mut module = RpcModule::new(());
module
.register_subscription("my_sub", "my_sub", "my_unsub", |_, mut sink, _| {
std::thread::spawn(move || loop {
if let Err(Error::SubscriptionClosed(_)) = sink.send(&"lo") {
return;
}
std::thread::sleep(std::time::Duration::from_millis(500));
});
Ok(())
})
.unwrap();
let mut my_sub: TestSubscription = module.test_subscription("my_sub", Vec::<()>::new()).await;
let (val, id) = my_sub.next::<String>().await.unwrap();
assert_eq!(&val, "lo");
assert_eq!(id, v2::params::SubscriptionId::Num(my_sub.subscription_id()));
// close the subscription to ensure it doesn't return any items.
my_sub.close();
assert_eq!(None, my_sub.next::<String>().await);
}
}
| 33.654582 | 174 | 0.676501 |
1471030697c1c60d0469255581dbe62f51fa20d1 | 2,257 | fn print(count: &mut usize, id: usize, layout: &layout::tree::LayoutR) {
*count += 1;
debug_println!("result: {:?} {:?} {:?}", *count, id, layout);
}
#[test]
fn size_defined_by_child_with_border() {
let mut layout_tree = layout::tree::LayoutTree::default();
layout_tree.insert(
1,
0,
0,
layout::idtree::InsertType::Back,
layout::style::Style {
position_type: layout::style::PositionType::Absolute,
size: layout::geometry::Size {
width: layout::style::Dimension::Points(1920.0),
height: layout::style::Dimension::Points(1024.0),
},
..Default::default()
},
);
layout_tree.insert(
2,
1,
0,
layout::idtree::InsertType::Back,
layout::style::Style {
border: layout::geometry::Rect {
start: layout::style::Dimension::Points(10f32),
end: layout::style::Dimension::Points(10f32),
top: layout::style::Dimension::Points(10f32),
bottom: layout::style::Dimension::Points(10f32),
..Default::default()
},
..Default::default()
},
);
layout_tree.insert(
3,
2,
0,
layout::idtree::InsertType::Back,
layout::style::Style {
size: layout::geometry::Size {
width: layout::style::Dimension::Points(10f32),
height: layout::style::Dimension::Points(10f32),
..Default::default()
},
..Default::default()
},
);
layout_tree.compute(print, &mut 0);
let layout = layout_tree.get_layout(2).unwrap();
assert_eq!(layout.rect.end - layout.rect.start, 30f32);
assert_eq!(layout.rect.bottom - layout.rect.top, 30f32);
assert_eq!(layout.rect.start, 0f32);
assert_eq!(layout.rect.top, 0f32);
let layout = layout_tree.get_layout(3).unwrap();
assert_eq!(layout.rect.end - layout.rect.start, 10f32);
assert_eq!(layout.rect.bottom - layout.rect.top, 10f32);
assert_eq!(layout.rect.start, 10f32);
assert_eq!(layout.rect.top, 10f32);
}
| 35.265625 | 73 | 0.534781 |
0eb7a8754118b13bc6f71c820195d4c78bc763eb | 1,549 | #[derive(Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "serde-support", derive(Serialize, Deserialize))]
pub enum Period {
Day,
Week,
Month,
Year,
}
impl Period {
fn is_leap_year(year: i32) -> bool {
year % 4 == 0 && (year % 100 != 0 || year % 400 == 0)
}
pub fn days_in_month(month: u32, year: i32) -> u32 {
if month == 2 {
if Self::is_leap_year(year) {
29
} else {
28
}
} else if [1, 3, 5, 7, 8, 10, 12].contains(&month) {
31
} else {
30
}
}
}
impl std::str::FromStr for Period {
type Err = ();
fn from_str(s: &str) -> Result<Self, ()> {
use self::Period::*;
match s {
"d" => Ok(Day),
"w" => Ok(Week),
"m" => Ok(Month),
"y" => Ok(Year),
_ => Err(()),
}
}
}
impl std::fmt::Display for Period {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
use self::Period::*;
let s = match *self {
Day => "d",
Week => "w",
Month => "m",
Year => "y",
};
f.write_str(s)?;
Ok(())
}
}
impl std::ops::Add<::chrono::NaiveDate> for Period {
type Output = chrono::NaiveDate;
fn add(self, rhs: Self::Output) -> Self::Output {
let rec = super::Recurrence {
num: 1,
period: self,
strict: true,
};
rec + rhs
}
}
| 20.653333 | 70 | 0.422208 |
4814adfc389a98d394d5b23f640d18d103eb32c7 | 1,683 | #![feature(test)]
extern crate test;
use std::hint::black_box;
use test::Bencher;
use wasabi_leb128::{ReadLeb128, WriteLeb128};
const VALUES: [i64; 13] = [
-100000, -10000, -1000, -100, -10, -1, 0, 1, 10, 100, 1000, 10000, 100000,
];
const ITERATIONS: usize = 10_000;
// Use the gimli.rs leb128 crate as a baseline, since it has already been optimized a bit, it seems.
#[bench]
fn bench_read_gimli(bencher: &mut Bencher) {
for &i in &VALUES {
let mut vec = Vec::new();
gimli_leb128::write::signed(&mut vec, i).unwrap();
bencher.iter(|| {
for _ in 0..ITERATIONS {
let result: i64 = gimli_leb128::read::signed(&mut vec.as_slice()).unwrap();
black_box(result);
}
})
}
}
#[bench]
fn bench_read_ours(bencher: &mut Bencher) {
for &i in &VALUES {
let mut vec = Vec::new();
vec.write_leb128(i).unwrap();
bencher.iter(|| {
for _ in 0..ITERATIONS {
let result: i64 = vec.as_slice().read_leb128().unwrap().0;
black_box(result);
}
})
}
}
#[bench]
fn bench_write_gimli(bencher: &mut Bencher) {
for &i in &VALUES {
let mut vec = Vec::new();
bencher.iter(|| {
for _ in 0..ITERATIONS {
black_box(gimli_leb128::write::signed(&mut vec, i).unwrap());
}
})
}
}
#[bench]
fn bench_write_ours(bencher: &mut Bencher) {
for &i in &VALUES {
let mut vec = Vec::new();
bencher.iter(|| {
for _ in 0..ITERATIONS {
black_box(vec.write_leb128(i).unwrap());
}
})
}
}
| 24.75 | 100 | 0.532383 |
64eddfc6a89b74c953ae7d33ebebc3fa777c3a19 | 15,965 | use std::rc::Rc;
use std::cell::RefCell;
use crate::sim1::state::State;
use crate::sim1::types::{AttackType, WeaponType, Vision, FieldEnergy, VecFieldEnergy, Particle, PHYSICAL_GAUGE, PYRO_GAUGE1A, PYRO_GAUGE2B, HYDRO_GAUGE1A, HYDRO_GAUGE2B, ELECTRO_GAUGE1A, ELECTRO_GAUGE2B, CRYO_GAUGE1A, CRYO_GAUGE2B, ANEMO_GAUGE1A, ANEMO_GAUGE2B, GEO_GAUGE1A, GEO_GAUGE2B, DENDRO_GAUGE1A, DENDRO_GAUGE2B};
use crate::sim1::fc::{FieldCharacterIndex, SpecialAbility, SkillAbility, CharacterAbility, NoopAbility, CharacterData, CharacterRecord, Enemy};
use crate::sim1::action::{Attack, AttackEvent, ICDTimer, ElementalAbsorption, NaLoop, SimpleSkill, SimpleSkillDot, SkillDamage2Dot, SimpleBurst, SimpleBurstDot, BurstDamage2Dot, NTimer, DurationTimer, StaminaTimer, ICDTimers};
use AttackType::*;
use WeaponType::*;
use Vision::*;
pub struct Yanfei {
na_noop: NoopAbility,
ca_noop: NoopAbility,
scarlet_seal: usize,
na: NaLoop,
ca_0: Attack,
ca_1: Attack,
ca_2: Attack,
ca_3: Attack,
ca_4: Attack,
a4_blazing_eye: Attack,
ca_timer: NTimer,
// TODO // stamina: StaminaTimer,
skill: SimpleSkill,
burst: SimpleBurst,
}
impl Yanfei {
pub fn record() -> CharacterRecord {
CharacterRecord::default()
.name("Yanfei").vision(Pyro).weapon(Catalyst).release_date("2020-12-23").version(1.5)
.base_hp(9352.0).base_atk(240.0).base_def(587.0)
// a1
.pyro_dmg(24.0 + 15.0)
.energy_cost(80.0)
}
pub fn new(idx: FieldCharacterIndex, icd_timer: &ICDTimers) -> Self {
Self {
na_noop: NoopAbility,
ca_noop: NoopAbility,
scarlet_seal: 0,
na: NaLoop::new(
// 3 attacks in 1.5 seconds
&[0.5,0.5,0.5],
vec![
Attack::na(105.01, 1, idx, &icd_timer),
Attack::na(93.83, 1, idx, &icd_timer),
Attack::na(136.82, 1, idx, &icd_timer),
]
),
ca_0: Attack {
kind: AttackType::Ca,
element: &PYRO_GAUGE1A,
multiplier: 159.99,
hits: 1,
icd_timer: Rc::clone(&icd_timer.ca),
idx,
},
ca_1: Attack {
kind: AttackType::Ca,
element: &PYRO_GAUGE1A,
multiplier: 188.22,
hits: 1,
icd_timer: Rc::clone(&icd_timer.ca),
idx,
},
ca_2: Attack {
kind: AttackType::Ca,
element: &PYRO_GAUGE1A,
multiplier: 216.46,
hits: 1,
icd_timer: Rc::clone(&icd_timer.ca),
idx,
},
ca_3: Attack {
kind: AttackType::Ca,
element: &PYRO_GAUGE1A,
multiplier: 244.69,
hits: 1,
icd_timer: Rc::clone(&icd_timer.ca),
idx,
},
ca_4: Attack {
kind: AttackType::Ca,
element: &PYRO_GAUGE1A,
multiplier: 272.92,
hits: 1,
icd_timer: Rc::clone(&icd_timer.ca),
idx,
},
a4_blazing_eye: Attack {
kind: AttackType::Ca,
element: &PYRO_GAUGE1A,
multiplier: 80.0,
hits: 1,
icd_timer: Rc::clone(&icd_timer.ca),
idx,
},
ca_timer: NTimer::new(&[1.0]),
skill: SimpleSkill::new(&[9.0], Particle::new(Pyro, 3.0), Attack {
kind: AttackType::PressSkill,
element: &PYRO_GAUGE1A,
multiplier: 305.28,
hits: 1,
icd_timer: Rc::clone(&icd_timer.skill),
idx,
}),
burst: SimpleBurst::new(&[1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0, 5.0], Attack {
kind: AttackType::Burst,
element: &PYRO_GAUGE2B,
multiplier: 328.32,
hits: 1,
icd_timer: Rc::clone(&icd_timer.burst),
idx,
}),
}
}
}
impl CharacterAbility for Yanfei {
fn na_ref(&self) -> &dyn SpecialAbility { &self.na_noop }
fn ca_ref(&self) -> &dyn SpecialAbility { &self.ca_noop }
fn skill_ref(&self) -> &dyn SkillAbility { &self.skill }
fn burst_ref(&self) -> &dyn SpecialAbility { &self.burst }
fn na_mut(&mut self) -> &mut dyn SpecialAbility { &mut self.na_noop }
fn ca_mut(&mut self) -> &mut dyn SpecialAbility { &mut self.ca_noop }
fn skill_mut(&mut self) -> &mut dyn SkillAbility { &mut self.skill }
fn burst_mut(&mut self) -> &mut dyn SpecialAbility { &mut self.burst }
}
impl SpecialAbility for Yanfei {
fn maybe_attack(&self, data: &CharacterData) -> Option<AttackEvent> {
match (self.scarlet_seal >= 3, self.ca_timer.n) {
(true, 0) => Some(AttackEvent {
kind: self.ca_0.kind,
idx: self.ca_0.idx,
}),
_ => self.na.maybe_attack(data),
}
}
fn update(&mut self, time: f32, event: &AttackEvent, data: &CharacterData, attack: &[*const Attack], particles: &[FieldEnergy], enemy: &Enemy) -> () {
let speedup_time = time * (1.0 + data.state.atk_spd / 100.0);
self.ca_timer.update(speedup_time, event == &self.ca_0);
self.na.update(speedup_time, event, data, attack, particles, enemy);
if event.idx == self.burst.attack.idx {
match &event.kind {
Na => self.scarlet_seal += 1,
Ca => self.scarlet_seal = 0,
PressSkill => self.scarlet_seal += 3,
_ => (),
}
}
if self.burst.timer.ping && 0 < self.burst.timer.n && self.burst.timer.n <= 15 {
self.scarlet_seal += 1;
}
}
fn additional_attack(&self, atk_queue: &mut Vec<*const Attack>, particles: &mut Vec<FieldEnergy>, data: &CharacterData) -> () {
self.na.additional_attack(atk_queue, particles, data);
if self.ca_timer.ping && self.ca_timer.n == 1 {
atk_queue.push(&self.a4_blazing_eye);
match self.scarlet_seal {
0 => atk_queue.push(&self.ca_0),
1 => atk_queue.push(&self.ca_1),
2 => atk_queue.push(&self.ca_2),
3 => atk_queue.push(&self.ca_3),
4 => atk_queue.push(&self.ca_4),
_ => (),
}
}
}
fn modify(&self, modifiable_data: &mut [CharacterData], enemy: &mut Enemy) -> () {
if 1 <= self.burst.timer.n && self.burst.timer.n < 16 {
let state = &mut modifiable_data[self.burst.attack.idx.0].state;
state.ca_dmg += 54.4;
}
}
fn reset(&mut self) -> () {
self.scarlet_seal = 0;
}
}
#[derive(Debug)]
pub struct EulaSkill {
grimheart: usize,
press_timer: NTimer,
hold_timer: NTimer,
press: Attack,
hold: Attack,
icewhirl_brand_1: Attack,
icewhirl_brand_2: Attack,
hold_a1: Attack,
press_particle: Particle,
hold_particle: Particle,
}
impl EulaSkill {
pub fn new(idx: FieldCharacterIndex, icd_timer: &ICDTimers) -> Self {
Self {
grimheart: 0,
press_timer: NTimer::new(&[4.0]),
hold_timer: NTimer::new(&[10.0]),
press: Attack {
kind: AttackType::PressSkill,
element: &CRYO_GAUGE1A,
multiplier: 263.52,
hits: 1,
icd_timer: Rc::clone(&icd_timer.skill),
idx,
},
hold: Attack {
kind: AttackType::HoldSkill,
element: &CRYO_GAUGE1A,
multiplier: 442.08,
hits: 1,
icd_timer: Rc::clone(&icd_timer.skill),
idx,
},
icewhirl_brand_1: Attack {
kind: AttackType::SkillDot,
element: &CRYO_GAUGE1A,
multiplier: 172.8,
hits: 1,
icd_timer: Rc::clone(&icd_timer.skill),
idx,
},
icewhirl_brand_2: Attack {
kind: AttackType::SkillDot,
element: &CRYO_GAUGE1A,
multiplier: 172.8,
hits: 2,
icd_timer: Rc::clone(&icd_timer.skill),
idx,
},
hold_a1: Attack {
kind: AttackType::SkillDot,
element: &PHYSICAL_GAUGE,
multiplier: 725.56 * 0.5,
hits: 1,
icd_timer: Rc::clone(&icd_timer.skill),
idx,
},
press_particle: Particle::new(Cryo, 1.5),
hold_particle: Particle::new(Cryo, 2.5),
}
}
}
impl SkillAbility for EulaSkill {
fn accelerate(&mut self, f: fn(&mut NTimer)) -> () {
f(&mut self.press_timer);
f(&mut self.hold_timer);
}
}
impl SpecialAbility for EulaSkill {
fn maybe_attack(&self, _data: &CharacterData) -> Option<AttackEvent> {
if self.grimheart == 2 {
self.hold.to_event(&self.hold_timer)
} else {
self.press.to_event(&self.press_timer)
}
}
fn update(&mut self, time: f32, event: &AttackEvent, data: &CharacterData, _attack: &[*const Attack], _particles: &[FieldEnergy], _enemy: &Enemy) -> () {
if event.idx == self.press.idx {
match &event.kind {
PressSkill => {
self.grimheart += 1;
self.press_timer.update(time, true);
self.hold_timer.update(time, false);
},
HoldSkill => {
self.grimheart = 0;
self.press_timer.update(time, false);
self.hold_timer.update(time, true);
},
Burst => {
self.grimheart += 1;
self.press_timer.reset();
self.hold_timer.reset();
},
_ => {
self.press_timer.update(time, false);
self.hold_timer.update(time, false);
},
}
} else {
self.press_timer.update(time, false);
self.hold_timer.update(time, false);
}
}
fn additional_attack(&self, atk_queue: &mut Vec<*const Attack>, particles: &mut Vec<FieldEnergy>, _data: &CharacterData) -> () {
if self.press_timer.ping && self.press_timer.n == 1 {
atk_queue.push(&self.press);
particles.push_p(self.press_particle);
}
if self.hold_timer.ping && self.hold_timer.n == 1 {
atk_queue.push(&self.hold);
particles.push_p(self.hold_particle);
match self.grimheart {
1 => atk_queue.push(&self.icewhirl_brand_1),
2 => {
atk_queue.push(&self.icewhirl_brand_2);
atk_queue.push(&self.hold_a1);
},
_ => (),
}
}
}
fn reset(&mut self) -> () {
self.grimheart = 0;
self.press_timer.reset();
self.hold_timer.reset();
}
}
pub struct Eula {
skill_debuff: DurationTimer,
lightfall_sword_stack: usize,
na: NaLoop,
ca: NoopAbility,
skill: EulaSkill,
burst: SimpleBurst,
burst_lightfall_sword: Attack,
burst_stack_n: Attack,
}
impl Eula {
pub fn record() -> CharacterRecord {
CharacterRecord::default()
.name("Eula").vision(Cryo).weapon(Claymore).release_date("2021-01-12").version(1.5)
.base_hp(13226.0).base_atk(342.0).base_def(751.0)
.cd(88.4)
.energy_cost(80.0)
}
pub fn new(idx: FieldCharacterIndex, icd_timer: &ICDTimers) -> Self {
Self {
skill_debuff: DurationTimer::new(7.0, &[0.0]),
lightfall_sword_stack: 0,
na: NaLoop::new(
// 5 attacks in 3.85 seconds
&[0.77,0.77,0.77,0.77,0.77],
vec![
Attack::na(177.38, 1, idx, &icd_timer),
Attack::na(184.93, 1, idx, &icd_timer),
Attack::na(112.28, 2, idx, &icd_timer),
Attack::na(222.67, 1, idx, &icd_timer),
Attack::na(142.0, 2, idx, &icd_timer),
]
),
ca: NoopAbility,
skill: EulaSkill::new(idx, icd_timer),
burst: SimpleBurst::new(&[7.0, 13.0], Attack {
kind: AttackType::Burst,
element: &CRYO_GAUGE2B,
multiplier: 617.44,
hits: 1,
icd_timer: Rc::clone(&icd_timer.burst),
idx,
}),
burst_lightfall_sword: Attack {
kind: AttackType::BurstDot,
element: &PHYSICAL_GAUGE,
multiplier: 725.56,
hits: 1,
icd_timer: Rc::clone(&icd_timer.burst),
idx,
},
burst_stack_n: Attack {
kind: AttackType::BurstDot,
element: &PHYSICAL_GAUGE,
multiplier: 0.0,
hits: 1,
icd_timer: Rc::clone(&icd_timer.burst),
idx,
},
}
}
}
impl CharacterAbility for Eula {
fn na_ref(&self) -> &dyn SpecialAbility { &self.na }
fn ca_ref(&self) -> &dyn SpecialAbility { &self.ca }
fn skill_ref(&self) -> &dyn SkillAbility { &self.skill }
fn burst_ref(&self) -> &dyn SpecialAbility { &self.burst }
fn na_mut(&mut self) -> &mut dyn SpecialAbility { &mut self.na }
fn ca_mut(&mut self) -> &mut dyn SpecialAbility { &mut self.ca }
fn skill_mut(&mut self) -> &mut dyn SkillAbility { &mut self.skill }
fn burst_mut(&mut self) -> &mut dyn SpecialAbility { &mut self.burst }
}
impl SpecialAbility for Eula {
fn update(&mut self, time: f32, event: &AttackEvent, data: &CharacterData, attack: &[*const Attack], particles: &[FieldEnergy], enemy: &Enemy) -> () {
self.skill_debuff.update(time, self.skill.hold_timer.ping && self.skill.hold_timer.n == 1);
// accumulate stacks
if self.burst.timer.n == 1 {
unsafe {
for &a in attack {
let atk = & *a;
if atk.idx != data.idx {
continue;
}
match &atk.kind {
Na | Ca | PressSkill | HoldSkill | SkillDot | Burst => self.lightfall_sword_stack += atk.hits,
_ => (),
};
}
}
}
if self.burst.timer.ping && self.burst.timer.n == 2 {
self.burst_stack_n.multiplier = 148.24 * self.lightfall_sword_stack as f32;
self.lightfall_sword_stack = 0;
}
}
fn additional_attack(&self, atk_queue: &mut Vec<*const Attack>, particles: &mut Vec<FieldEnergy>, data: &CharacterData) -> () {
if self.burst.timer.ping && self.burst.timer.n == 2 {
atk_queue.push(&self.burst_lightfall_sword);
atk_queue.push(&self.burst_stack_n);
}
}
fn modify(&self, modifiable_data: &mut [CharacterData], enemy: &mut Enemy) -> () {
if self.skill_debuff.ping {
match self.skill_debuff.n {
1 => { enemy.debuff.cryo += 25.0; enemy.debuff.physical += 25.0 },
0 => { enemy.debuff.cryo -= 25.0; enemy.debuff.physical -= 25.0 },
_ => (),
}
}
}
fn reset(&mut self) -> () {
self.lightfall_sword_stack = 0;
}
}
| 35.715884 | 320 | 0.507047 |
01b74c53dae8d4503f74ed06bee2fba5962b0456 | 2,820 | // The wit_bindgen_wasmtime::import below is triggering this lint.
#![allow(clippy::needless_question_mark)]
use anyhow::Result;
use spin_engine::{Builder, ExecutionContextConfiguration};
use spin_manifest::{CoreComponent, ModuleSource, WasmConfig};
use std::{sync::Arc, time::Duration};
use tokio::task::spawn_blocking;
wit_bindgen_wasmtime::import!("spin-timer.wit");
type ExecutionContext = spin_engine::ExecutionContext<spin_timer::SpinTimerData>;
#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.init();
let trigger = TimerTrigger::new(Duration::from_secs(1), component()).await?;
trigger.run().await
}
/// A custom timer trigger that executes the
/// first component of an application on every interval.
#[derive(Clone)]
pub struct TimerTrigger {
/// The interval at which the component is executed.
pub interval: Duration,
/// The Spin execution context.
engine: Arc<ExecutionContext>,
}
impl TimerTrigger {
/// Creates a new trigger.
pub async fn new(interval: Duration, component: CoreComponent) -> Result<Self> {
let config = ExecutionContextConfiguration {
components: vec![component],
label: "timer-app".to_string(),
..Default::default()
};
let engine = Arc::new(Builder::build_default(config).await?);
log::debug!("Created new Timer trigger.");
Ok(Self { interval, engine })
}
/// Runs the trigger at every interval.
pub async fn run(&self) -> Result<()> {
let mut interval = tokio::time::interval(self.interval);
loop {
interval.tick().await;
self.handle(
chrono::Local::now()
.format("%Y-%m-%d][%H:%M:%S")
.to_string(),
)
.await?;
}
}
/// Execute the first component in the application configuration.
async fn handle(&self, msg: String) -> Result<()> {
let (mut store, instance) = self.engine.prepare_component(
&self.engine.config.components[0].id,
None,
None,
None,
None,
)?;
let res = spawn_blocking(move || -> Result<String> {
let t = spin_timer::SpinTimer::new(&mut store, &instance, |host| {
host.data.as_mut().unwrap()
})?;
Ok(t.handle_timer_request(&mut store, &msg)?)
})
.await??;
log::info!("{}\n", res);
Ok(())
}
}
pub fn component() -> CoreComponent {
CoreComponent {
source: ModuleSource::FileReference("target/test-programs/echo.wasm".into()),
id: "test".to_string(),
wasm: WasmConfig::default(),
}
}
| 30.652174 | 85 | 0.595745 |
6484d5c029aa3f97ef01f713cc576f4a95258c25 | 4,343 | #![crate_name = "uu_nohup"]
/*
* This file is part of the uutils coreutils package.
*
* (c) 2014 Vsevolod Velichko <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
extern crate getopts;
extern crate libc;
#[macro_use]
extern crate uucore;
use libc::{c_char, signal, dup2, execvp};
use libc::{SIG_IGN, SIGHUP};
use std::ffi::CString;
use std::fs::{File, OpenOptions};
use std::io::{Error, Write};
use std::os::unix::prelude::*;
use std::path::{Path, PathBuf};
use std::env;
use uucore::fs::{is_stderr_interactive, is_stdin_interactive, is_stdout_interactive};
static NAME: &'static str = "nohup";
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
#[cfg(target_os = "macos")]
extern {
fn _vprocmgr_detach_from_console(flags: u32) -> *const libc::c_int;
}
#[cfg(any(target_os = "linux", target_os = "freebsd"))]
unsafe fn _vprocmgr_detach_from_console(_: u32) -> *const libc::c_int { std::ptr::null() }
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = getopts::Options::new();
opts.optflag("h", "help", "Show help and exit");
opts.optflag("V", "version", "Show version and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(f) => {
show_error!("{}", f);
show_usage(&opts);
return 1
}
};
if matches.opt_present("V") { println!("{} {}", NAME, VERSION); return 0 }
if matches.opt_present("h") { show_usage(&opts); return 0 }
if matches.free.is_empty() {
show_error!("Missing operand: COMMAND");
println!("Try `{} --help` for more information.", NAME);
return 1
}
replace_fds();
unsafe { signal(SIGHUP, SIG_IGN) };
if unsafe { _vprocmgr_detach_from_console(0) } != std::ptr::null() { crash!(2, "Cannot detach from console")};
let cstrs: Vec<CString> = matches.free.iter().map(|x| CString::new(x.as_bytes()).unwrap()).collect();
let mut args: Vec<*const c_char> = cstrs.iter().map(|s| s.as_ptr()).collect();
args.push(std::ptr::null());
unsafe { execvp(args[0], args.as_mut_ptr())}
}
fn replace_fds() {
if is_stdin_interactive() {
let new_stdin = match File::open(Path::new("/dev/null")) {
Ok(t) => t,
Err(e) => {
crash!(2, "Cannot replace STDIN: {}", e)
}
};
if unsafe { dup2(new_stdin.as_raw_fd(), 0) } != 0 {
crash!(2, "Cannot replace STDIN: {}", Error::last_os_error())
}
}
if is_stdout_interactive() {
let new_stdout = find_stdout();
let fd = new_stdout.as_raw_fd();
if unsafe { dup2(fd, 1) } != 1 {
crash!(2, "Cannot replace STDOUT: {}", Error::last_os_error())
}
}
if is_stderr_interactive() {
if unsafe { dup2(1, 2) } != 2 {
crash!(2, "Cannot replace STDERR: {}", Error::last_os_error())
}
}
}
fn find_stdout() -> File {
match OpenOptions::new().write(true).create(true).append(true).open(Path::new("nohup.out")) {
Ok(t) => {
show_warning!("Output is redirected to: nohup.out");
t
},
Err(e) => {
let home = match env::var("HOME") {
Err(_) => crash!(2, "Cannot replace STDOUT: {}", e),
Ok(h) => h
};
let mut homeout = PathBuf::from(home);
homeout.push("nohup.out");
match OpenOptions::new().write(true).create(true).append(true).open(&homeout) {
Ok(t) => {
show_warning!("Output is redirected to: {:?}", homeout);
t
},
Err(e) => {
crash!(2, "Cannot replace STDOUT: {}", e)
}
}
}
}
}
fn show_usage(opts: &getopts::Options) {
let msg = format!("{0} {1}
Usage:
{0} COMMAND [ARG]...
{0} OPTION
Run COMMAND ignoring hangup signals.
If standard input is terminal, it'll be replaced with /dev/null.
If standard output is terminal, it'll be appended to nohup.out instead,
or $HOME/nohup.out, if nohup.out open failed.
If standard error is terminal, it'll be redirected to stdout.", NAME, VERSION);
print!("{}", opts.usage(&msg));
}
| 30.159722 | 114 | 0.566889 |
1d1be04fd0a70f97ef246a29dc900243cc113d44 | 478 | use amethyst::Error;
use stdio_command_model::{StdioCommandEvent, StdioCommandEventArgs};
use stdio_spi::StdinMapper;
/// Builds a `StdioCommandEvent` from stdin tokens.
#[derive(Debug)]
pub struct StdioCommandEventStdinMapper;
impl StdinMapper for StdioCommandEventStdinMapper {
type SystemData = ();
type Event = StdioCommandEvent;
type Args = StdioCommandEventArgs;
fn map(_: &(), args: Self::Args) -> Result<Self::Event, Error> {
Ok(args)
}
}
| 26.555556 | 68 | 0.721757 |
0abf7f65c9dfddcb21f50dfd057248181e6c7dec | 1,147 | #[doc = "Reader of register PK_PINLOCKN"]
pub type R = crate::R<u32, super::PK_PINLOCKN>;
#[doc = "Writer for register PK_PINLOCKN"]
pub type W = crate::W<u32, super::PK_PINLOCKN>;
#[doc = "Register PK_PINLOCKN `reset()`'s with value 0xffff"]
impl crate::ResetValue for super::PK_PINLOCKN {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0xffff
}
}
#[doc = "Reader of field `PINLOCKN`"]
pub type PINLOCKN_R = crate::R<u16, u16>;
#[doc = "Write proxy for field `PINLOCKN`"]
pub struct PINLOCKN_W<'a> {
w: &'a mut W,
}
impl<'a> PINLOCKN_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
self.w.bits = (self.w.bits & !0xffff) | ((value as u32) & 0xffff);
self.w
}
}
impl R {
#[doc = "Bits 0:15 - Unlocked Pins"]
#[inline(always)]
pub fn pinlockn(&self) -> PINLOCKN_R {
PINLOCKN_R::new((self.bits & 0xffff) as u16)
}
}
impl W {
#[doc = "Bits 0:15 - Unlocked Pins"]
#[inline(always)]
pub fn pinlockn(&mut self) -> PINLOCKN_W {
PINLOCKN_W { w: self }
}
}
| 27.97561 | 74 | 0.592851 |
ef89acc3c1a7a2ec22da30fc29b1f04329d2fe56 | 330 | use std::{fs, io, path::Path};
/// Ensure a directory exists and is empty
pub fn ensure_dir_exists_fresh(dir: &Path) -> io::Result<()> {
if dir.exists() {
// clean it out first
fs::remove_dir_all(&dir)?;
}
fs::create_dir_all(&dir)?;
debug!("Ensuring fresh dir: {}", &dir.display());
Ok(())
}
| 23.571429 | 62 | 0.575758 |
91a3dde98564f9b03067b4c6b7027277fa6f2019 | 3,343 | use lock::Mutex;
use riscv::register::sie;
use crate::prelude::IrqHandler;
use crate::scheme::{IrqScheme, Scheme};
use crate::{DeviceError, DeviceResult};
use alloc::format;
use alloc::string::String;
use core::sync::atomic::{AtomicU8, Ordering};
const S_SOFT: usize = 1;
const S_TIMER: usize = 5;
const S_EXT: usize = 9;
static INTC_NUM: AtomicU8 = AtomicU8::new(0);
#[repr(usize)]
pub enum ScauseIntCode {
SupervisorSoft = S_SOFT,
SupervisorTimer = S_TIMER,
SupervisorExternal = S_EXT,
}
pub struct Intc {
name: String,
soft_handler: Mutex<Option<IrqHandler>>,
timer_handler: Mutex<Option<IrqHandler>>,
ext_handler: Mutex<Option<IrqHandler>>,
}
impl Intc {
pub fn new() -> Self {
Self {
name: format!("riscv-intc-cpu{}", INTC_NUM.fetch_add(1, Ordering::Relaxed)),
soft_handler: Mutex::new(None),
timer_handler: Mutex::new(None),
ext_handler: Mutex::new(None),
}
}
fn with_handler<F>(&self, cause: usize, op: F) -> DeviceResult
where
F: FnOnce(&mut Option<IrqHandler>) -> DeviceResult,
{
match cause {
S_SOFT => op(&mut self.soft_handler.lock()),
S_TIMER => op(&mut self.timer_handler.lock()),
S_EXT => op(&mut self.ext_handler.lock()),
_ => {
error!("invalid SCAUSE value {:#x}!", cause);
Err(DeviceError::InvalidParam)
}
}
}
}
impl Default for Intc {
fn default() -> Self {
Self::new()
}
}
impl Scheme for Intc {
fn name(&self) -> &str {
self.name.as_str()
}
fn handle_irq(&self, cause: usize) {
self.with_handler(cause, |opt| {
if let Some(h) = opt {
h();
} else {
warn!("no registered handler for SCAUSE {}!", cause);
}
Ok(())
})
.unwrap();
}
}
impl IrqScheme for Intc {
fn is_valid_irq(&self, cause: usize) -> bool {
matches!(cause, S_SOFT | S_TIMER | S_EXT)
}
fn mask(&self, cause: usize) -> DeviceResult {
unsafe {
match cause {
S_SOFT => sie::clear_ssoft(),
S_TIMER => sie::clear_stimer(),
S_EXT => sie::clear_sext(),
_ => return Err(DeviceError::InvalidParam),
}
}
Ok(())
}
fn unmask(&self, cause: usize) -> DeviceResult {
unsafe {
match cause {
S_SOFT => sie::set_ssoft(),
S_TIMER => sie::set_stimer(),
S_EXT => sie::set_sext(),
_ => return Err(DeviceError::InvalidParam),
}
}
Ok(())
}
fn register_handler(&self, cause: usize, handler: IrqHandler) -> DeviceResult {
self.with_handler(cause, |opt| {
if opt.is_some() {
Err(DeviceError::AlreadyExists)
} else {
*opt = Some(handler);
Ok(())
}
})
}
fn unregister(&self, cause: usize) -> DeviceResult {
self.with_handler(cause, |opt| {
if opt.is_some() {
*opt = None;
Ok(())
} else {
Err(DeviceError::InvalidParam)
}
})
}
}
| 25.325758 | 88 | 0.507628 |
fbe1c1109f6384abbed278026fe25898e22b9cb5 | 80,839 | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use std::{
cell::RefCell,
collections::{BTreeMap, BTreeSet, LinkedList},
};
use itertools::Itertools;
use num::{BigInt, BigUint, FromPrimitive, Num};
use move_core_types::value::MoveValue;
use move_ir_types::location::Spanned;
use move_lang::{
expansion::ast as EA, hlir::ast as HA, naming::ast as NA, parser::ast as PA, shared::Name,
};
use crate::{
ast::{Exp, LocalVarDecl, ModuleName, Operation, QualifiedSymbol, QuantKind, Value},
builder::{
model_builder::{ConstEntry, LocalVarEntry, SpecFunEntry},
module_builder::ModuleBuilder,
},
model::{FieldId, Loc, ModuleEnv, ModuleId, NodeId, QualifiedId, SpecFunId, StructId},
symbol::{Symbol, SymbolPool},
ty::{PrimitiveType, Substitution, Type, TypeDisplayContext, BOOL_TYPE},
};
#[derive(Debug)]
pub(crate) struct ExpTranslator<'env, 'translator, 'module_translator> {
pub parent: &'module_translator mut ModuleBuilder<'env, 'translator>,
/// A symbol table for type parameters.
pub type_params_table: BTreeMap<Symbol, Type>,
/// Type parameters in sequence they have been added.
pub type_params: Vec<(Symbol, Type)>,
/// A scoped symbol table for local names. The first element in the list contains the most
/// inner scope.
pub local_table: LinkedList<BTreeMap<Symbol, LocalVarEntry>>,
/// When compiling a condition, the result type of the function the condition is associated
/// with.
pub result_type: Option<Type>,
/// Status for the `old(...)` expression form.
pub old_status: OldExpStatus,
/// The currently build type substitution.
pub subs: Substitution,
/// A counter for generating type variables.
pub type_var_counter: u16,
/// A marker to indicate the node_counter start state.
pub node_counter_start: usize,
/// The locals which have been accessed with this build. The boolean indicates whether
/// they ore accessed in `old(..)` context.
pub accessed_locals: BTreeSet<(Symbol, bool)>,
/// The number of outer context scopes in `local_table` which are accounted for in
/// `accessed_locals`. See also documentation of function `mark_context_scopes`.
pub outer_context_scopes: usize,
/// A boolean indicating whether we are translating a let expression
pub in_let: bool,
/// A flag to indicate whether we are translating expressions in a spec fun.
pub translating_fun_as_spec_fun: bool,
/// A flag to indicate whether errors have been generated so far.
pub errors_generated: RefCell<bool>,
/// Set containing all the functions called during translation.
pub called_spec_funs: BTreeSet<(ModuleId, SpecFunId)>,
}
#[derive(Debug, PartialEq)]
pub(crate) enum OldExpStatus {
NotSupported,
OutsideOld,
InsideOld,
}
/// # General
impl<'env, 'translator, 'module_translator> ExpTranslator<'env, 'translator, 'module_translator> {
pub fn new(parent: &'module_translator mut ModuleBuilder<'env, 'translator>) -> Self {
let node_counter_start = parent.parent.env.next_free_node_number();
Self {
parent,
type_params_table: BTreeMap::new(),
type_params: vec![],
local_table: LinkedList::new(),
result_type: None,
old_status: OldExpStatus::NotSupported,
subs: Substitution::new(),
type_var_counter: 0,
node_counter_start,
accessed_locals: BTreeSet::new(),
outer_context_scopes: 0,
in_let: false,
/// Following flags used to translate pure Move functions.
translating_fun_as_spec_fun: false,
errors_generated: RefCell::new(false),
called_spec_funs: BTreeSet::new(),
}
}
pub fn set_in_let(mut self) -> Self {
self.in_let = true;
self
}
pub fn translate_fun_as_spec_fun(&mut self) {
self.translating_fun_as_spec_fun = true;
}
pub fn new_with_old(
parent: &'module_translator mut ModuleBuilder<'env, 'translator>,
allow_old: bool,
) -> Self {
let mut et = ExpTranslator::new(parent);
if allow_old {
et.old_status = OldExpStatus::OutsideOld;
} else {
et.old_status = OldExpStatus::NotSupported;
};
et
}
/// Extract a map from names to types from the scopes of this build.
pub fn extract_var_map(&self) -> BTreeMap<Symbol, LocalVarEntry> {
let mut vars: BTreeMap<Symbol, LocalVarEntry> = BTreeMap::new();
for s in &self.local_table {
vars.extend(s.clone());
}
vars
}
// Get type parameters from this build.
pub fn get_type_params(&self) -> Vec<Type> {
self.type_params
.iter()
.map(|(_, t)| t.clone())
.collect_vec()
}
// Get type parameters with names from this build.
pub fn get_type_params_with_name(&self) -> Vec<(Symbol, Type)> {
self.type_params.clone()
}
/// Shortcut for accessing symbol pool.
pub fn symbol_pool(&self) -> &SymbolPool {
self.parent.parent.env.symbol_pool()
}
/// Shortcut for translating a Move AST location into ours.
pub fn to_loc(&self, loc: &move_ir_types::location::Loc) -> Loc {
self.parent.parent.env.to_loc(loc)
}
/// Shortcut for reporting an error.
pub fn error(&self, loc: &Loc, msg: &str) {
if self.translating_fun_as_spec_fun {
*self.errors_generated.borrow_mut() = true;
} else {
self.parent.parent.error(loc, msg);
}
}
/// Creates a fresh type variable.
fn fresh_type_var(&mut self) -> Type {
let var = Type::Var(self.type_var_counter);
self.type_var_counter += 1;
var
}
/// Shortcut to create a new node id.
fn new_node_id(&self) -> NodeId {
self.parent.parent.env.new_node_id()
}
/// Shortcut to create a new node id and assigns type and location to it.
pub fn new_node_id_with_type_loc(&self, ty: &Type, loc: &Loc) -> NodeId {
self.parent.parent.env.new_node(loc.clone(), ty.clone())
}
// Short cut for getting node type.
pub fn get_node_type(&self, node_id: NodeId) -> Type {
self.parent.parent.env.get_node_type(node_id)
}
// Short cut for getting node type.
fn get_node_type_opt(&self, node_id: NodeId) -> Option<Type> {
self.parent.parent.env.get_node_type_opt(node_id)
}
// Short cut for getting node location.
#[allow(dead_code)]
fn get_node_loc(&self, node_id: NodeId) -> Loc {
self.parent.parent.env.get_node_loc(node_id)
}
// Short cut for getting node instantiation.
fn get_node_instantiation_opt(&self, node_id: NodeId) -> Option<Vec<Type>> {
self.parent.parent.env.get_node_instantiation_opt(node_id)
}
/// Shortcut to update node type.
pub fn update_node_type(&self, node_id: NodeId, ty: Type) {
self.parent.parent.env.update_node_type(node_id, ty);
}
/// Shortcut to set/update instantiation for the given node id.
fn set_node_instantiation(&self, node_id: NodeId, instantiation: Vec<Type>) {
self.parent
.parent
.env
.set_node_instantiation(node_id, instantiation);
}
fn update_node_instantiation(&self, node_id: NodeId, instantiation: Vec<Type>) {
self.parent
.parent
.env
.update_node_instantiation(node_id, instantiation);
}
/// Finalizes types in this build, producing errors if some could not be inferred
/// and remained incomplete.
pub fn finalize_types(&mut self) {
if self.parent.parent.env.has_errors() {
// Don't do that check if we already reported errors, as this would produce
// useless followup errors.
return;
}
for i in self.node_counter_start..self.parent.parent.env.next_free_node_number() {
let node_id = NodeId::new(i);
if let Some(ty) = self.get_node_type_opt(node_id) {
let ty = self.finalize_type(node_id, &ty);
self.update_node_type(node_id, ty);
}
if let Some(inst) = self.get_node_instantiation_opt(node_id) {
let inst = inst
.iter()
.map(|ty| self.finalize_type(node_id, ty))
.collect_vec();
self.update_node_instantiation(node_id, inst);
}
}
}
/// Finalize the the given type, producing an error if it is not complete.
fn finalize_type(&self, node_id: NodeId, ty: &Type) -> Type {
let ty = self.subs.specialize(ty);
if ty.is_incomplete() {
// This type could not be fully inferred.
let loc = self.parent.parent.env.get_node_loc(node_id);
self.error(
&loc,
&format!(
"unable to infer type: `{}`",
ty.display(&self.type_display_context())
),
);
}
ty
}
/// Fix any free type variables remaining in this expression build to a freshly
/// generated type parameter, adding them to the passed vector.
pub fn fix_types(&mut self, generated_params: &mut Vec<Type>) {
if self.parent.parent.env.has_errors() {
return;
}
for i in self.node_counter_start..self.parent.parent.env.next_free_node_number() {
let node_id = NodeId::new(i);
if let Some(ty) = self.get_node_type_opt(node_id) {
let ty = self.fix_type(generated_params, &ty);
self.update_node_type(node_id, ty);
}
if let Some(inst) = self.get_node_instantiation_opt(node_id) {
let inst = inst
.iter()
.map(|ty| self.fix_type(generated_params, ty))
.collect_vec();
self.update_node_instantiation(node_id, inst);
}
}
}
/// Fix the given type, replacing any remaining free type variables with a type parameter.
fn fix_type(&mut self, generated_params: &mut Vec<Type>, ty: &Type) -> Type {
// First specialize the type.
let ty = self.subs.specialize(ty);
// Next get whatever free variables remain.
let vars = ty.get_vars();
// Assign a type parameter to each free variable and add it to substitution.
for var in vars {
let type_param = Type::TypeParameter(generated_params.len() as u16);
generated_params.push(type_param.clone());
self.subs.bind(var, type_param);
}
// Return type with type parameter substitution applied.
self.subs.specialize(&ty)
}
/// Constructs a type display context used to visualize types in error messages.
fn type_display_context(&self) -> TypeDisplayContext<'_> {
TypeDisplayContext::WithoutEnv {
symbol_pool: self.symbol_pool(),
reverse_struct_table: &self.parent.parent.reverse_struct_table,
}
}
/// Creates an error expression.
pub fn new_error_exp(&mut self) -> Exp {
let id =
self.new_node_id_with_type_loc(&Type::Error, &self.parent.parent.env.internal_loc());
Exp::Invalid(id)
}
/// Enters a new scope in the locals table.
pub fn enter_scope(&mut self) {
self.local_table.push_front(BTreeMap::new());
}
/// Exits the most inner scope of the locals table.
pub fn exit_scope(&mut self) {
self.local_table.pop_front();
}
/// Mark the current active scope level as context, i.e. symbols which are not
/// declared in this expression. This is used to determine what
/// `get_accessed_context_locals` returns.
pub fn mark_context_scopes(mut self) -> Self {
self.outer_context_scopes = self.local_table.len();
self
}
/// Gets the locals this build has accessed so far and which belong to the
/// context, i.a. are not declared in this expression.
pub fn get_accessed_context_locals(&self) -> Vec<(Symbol, bool)> {
self.accessed_locals.iter().cloned().collect_vec()
}
/// Defines a type parameter.
pub fn define_type_param(&mut self, loc: &Loc, name: Symbol, ty: Type) {
self.type_params.push((name, ty.clone()));
if self.type_params_table.insert(name, ty).is_some() {
let param_name = name.display(self.symbol_pool());
self.parent
.parent
.error(loc, &format!("duplicate declaration of `{}`", param_name));
}
}
/// Defines a local in the most inner scope. This produces an error
/// if the name already exists. The operation option is used for names
/// which represent special operations.
pub fn define_local(
&mut self,
loc: &Loc,
name: Symbol,
type_: Type,
operation: Option<Operation>,
temp_index: Option<usize>,
) {
let entry = LocalVarEntry {
loc: loc.clone(),
type_,
operation,
temp_index,
};
if let Some(old) = self
.local_table
.front_mut()
.expect("symbol table empty")
.insert(name, entry)
{
let display = name.display(self.symbol_pool());
self.error(loc, &format!("duplicate declaration of `{}`", display));
self.error(&old.loc, &format!("previous declaration of `{}`", display));
}
}
/// Lookup a local in this build.
pub fn lookup_local(&mut self, name: Symbol, in_old: bool) -> Option<&LocalVarEntry> {
let mut depth = self.local_table.len();
for scope in &self.local_table {
if let Some(entry) = scope.get(&name) {
if depth <= self.outer_context_scopes {
// Account for access if this belongs to one of the outer scopes
// considered context (i.e. not declared in this expression).
self.accessed_locals.insert((name, in_old));
}
return Some(entry);
}
depth -= 1;
}
None
}
/// Analyzes the sequence of type parameters as they are provided via the source AST and enters
/// them into the environment. Returns a vector for representing them in the target AST.
pub fn analyze_and_add_type_params<T>(
&mut self,
type_params: &[(Name, T)],
) -> Vec<(Symbol, Type)> {
type_params
.iter()
.enumerate()
.map(|(i, (n, _))| {
let ty = Type::TypeParameter(i as u16);
let sym = self.symbol_pool().make(n.value.as_str());
self.define_type_param(&self.to_loc(&n.loc), sym, ty.clone());
(sym, ty)
})
.collect_vec()
}
/// Analyzes the sequence of function parameters as they are provided via the source AST and
/// enters them into the environment. Returns a vector for representing them in the target AST.
pub fn analyze_and_add_params(
&mut self,
params: &[(PA::Var, EA::Type)],
for_move_fun: bool,
) -> Vec<(Symbol, Type)> {
params
.iter()
.enumerate()
.map(|(idx, (v, ty))| {
let ty = self.translate_type(ty);
let sym = self.symbol_pool().make(v.0.value.as_str());
self.define_local(
&self.to_loc(&v.0.loc),
sym,
ty.clone(),
None,
// If this is for a proper Move function (not spec function), add the
// index so we can resolve this to a `Temporary` expression instead of
// a `LocalVar`.
if for_move_fun { Some(idx) } else { None },
);
(sym, ty)
})
.collect_vec()
}
/// Displays a call target for error messages.
fn display_call_target(&mut self, module: &Option<ModuleName>, name: Symbol) -> String {
if let Some(m) = module {
if m != &self.parent.parent.builtin_module() {
// Only print the module name if it is not the pseudo builtin module.
return format!(
"{}",
QualifiedSymbol {
module_name: m.clone(),
symbol: name,
}
.display(self.symbol_pool())
);
}
}
format!("{}", name.display(self.symbol_pool()))
}
/// Displays a call target candidate for error messages.
fn display_call_cand(
&mut self,
module: &Option<ModuleName>,
name: Symbol,
entry: &SpecFunEntry,
) -> String {
let target = self.display_call_target(module, name);
let type_display_context = self.type_display_context();
format!(
"{}({}): {}",
target,
entry
.arg_types
.iter()
.map(|ty| ty.display(&type_display_context))
.join(", "),
entry.result_type.display(&type_display_context)
)
}
}
/// # Type Translation
impl<'env, 'translator, 'module_translator> ExpTranslator<'env, 'translator, 'module_translator> {
/// Translates an hlir type into a target AST type.
pub fn translate_hlir_single_type(&mut self, ty: &HA::SingleType) -> Type {
use HA::SingleType_::*;
match &ty.value {
Ref(is_mut, ty) => {
let ty = self.translate_hlir_base_type(&*ty);
if ty == Type::Error {
Type::Error
} else {
Type::Reference(*is_mut, Box::new(ty))
}
}
Base(ty) => self.translate_hlir_base_type(&*ty),
}
}
fn translate_hlir_base_type(&mut self, ty: &HA::BaseType) -> Type {
use HA::{BaseType_::*, TypeName_::*};
use NA::{BuiltinTypeName_::*, TParam};
match &ty.value {
Param(TParam {
user_specified_name,
..
}) => {
let sym = self.symbol_pool().make(user_specified_name.value.as_str());
self.type_params_table[&sym].clone()
}
Apply(_, type_name, args) => {
let loc = self.to_loc(&type_name.loc);
match &type_name.value {
Builtin(builtin_type_name) => match &builtin_type_name.value {
Address => Type::new_prim(PrimitiveType::Address),
Signer => Type::new_prim(PrimitiveType::Signer),
U8 => Type::new_prim(PrimitiveType::U8),
U64 => Type::new_prim(PrimitiveType::U64),
U128 => Type::new_prim(PrimitiveType::U128),
Vector => Type::Vector(Box::new(self.translate_hlir_base_type(&args[0]))),
Bool => Type::new_prim(PrimitiveType::Bool),
},
ModuleType(m, n) => {
let module_name = ModuleName::from_str(
&m.value.0.to_string(),
self.symbol_pool().make(m.value.1.as_str()),
);
let symbol = self.symbol_pool().make(n.0.value.as_str());
let qsym = QualifiedSymbol {
module_name,
symbol,
};
let rty = self.parent.parent.lookup_type(&loc, &qsym);
if !args.is_empty() {
// Replace type instantiation.
if let Type::Struct(mid, sid, _) = &rty {
let arg_types = self.translate_hlir_base_types(args);
if arg_types.iter().any(|x| *x == Type::Error) {
Type::Error
} else {
Type::Struct(*mid, *sid, arg_types)
}
} else {
Type::Error
}
} else {
rty
}
}
}
}
_ => unreachable!(),
}
}
fn translate_hlir_base_types(&mut self, tys: &[HA::BaseType]) -> Vec<Type> {
tys.iter()
.map(|t| self.translate_hlir_base_type(t))
.collect()
}
/// Translates a source AST type into a target AST type.
pub fn translate_type(&mut self, ty: &EA::Type) -> Type {
use EA::Type_::*;
match &ty.value {
Apply(access, args) => {
if let EA::ModuleAccess_::Name(n) = &access.value {
let check_zero_args = |et: &mut Self, ty: Type| {
if args.is_empty() {
ty
} else {
et.error(&et.to_loc(&n.loc), "expected no type arguments");
Type::Error
}
};
// Attempt to resolve as builtin type.
match n.value.as_str() {
"bool" => {
return check_zero_args(self, Type::new_prim(PrimitiveType::Bool));
}
"u8" => return check_zero_args(self, Type::new_prim(PrimitiveType::U8)),
"u64" => return check_zero_args(self, Type::new_prim(PrimitiveType::U64)),
"u128" => {
return check_zero_args(self, Type::new_prim(PrimitiveType::U128));
}
"num" => return check_zero_args(self, Type::new_prim(PrimitiveType::Num)),
"range" => {
return check_zero_args(self, Type::new_prim(PrimitiveType::Range));
}
"address" => {
return check_zero_args(self, Type::new_prim(PrimitiveType::Address));
}
"signer" => {
return check_zero_args(self, Type::new_prim(PrimitiveType::Signer));
}
"type" => {
return check_zero_args(self, Type::new_prim(PrimitiveType::TypeValue));
}
"vector" => {
if args.len() != 1 {
self.error(
&self.to_loc(&ty.loc),
"expected one type argument for `vector`",
);
return Type::Error;
} else {
return Type::Vector(Box::new(self.translate_type(&args[0])));
}
}
_ => {}
}
// Attempt to resolve as a type parameter.
let sym = self.symbol_pool().make(n.value.as_str());
if let Some(ty) = self.type_params_table.get(&sym).cloned() {
return check_zero_args(self, ty);
}
// Attempt to resolve as a type value.
if let Some(entry) = self.lookup_local(sym, false) {
let ty = entry.type_.clone();
self.check_type(
&self.to_loc(&n.loc),
&ty,
&Type::new_prim(PrimitiveType::TypeValue),
"in type",
);
return check_zero_args(self, Type::TypeLocal(sym));
}
}
let loc = self.to_loc(&access.loc);
let sym = self.parent.module_access_to_qualified(access);
let rty = self.parent.parent.lookup_type(&loc, &sym);
if !args.is_empty() {
// Replace type instantiation.
if let Type::Struct(mid, sid, params) = &rty {
if params.len() != args.len() {
self.error(&loc, "type argument count mismatch");
Type::Error
} else {
Type::Struct(*mid, *sid, self.translate_types(args))
}
} else {
self.error(&loc, "type cannot have type arguments");
Type::Error
}
} else {
rty
}
}
Ref(is_mut, ty) => Type::Reference(*is_mut, Box::new(self.translate_type(&*ty))),
Fun(args, result) => Type::Fun(
self.translate_types(&args),
Box::new(self.translate_type(&*result)),
),
Unit => Type::Tuple(vec![]),
Multiple(vst) => Type::Tuple(self.translate_types(vst)),
UnresolvedError => Type::Error,
}
}
/// Translates a slice of single types.
pub fn translate_types(&mut self, tys: &[EA::Type]) -> Vec<Type> {
tys.iter().map(|t| self.translate_type(t)).collect()
}
/// Translates option a slice of single types.
pub fn translate_types_opt(&mut self, tys_opt: &Option<Vec<EA::Type>>) -> Vec<Type> {
tys_opt
.as_deref()
.map(|tys| self.translate_types(tys))
.unwrap_or_else(Vec::new)
}
}
/// # Expression Translation
impl<'env, 'translator, 'module_translator> ExpTranslator<'env, 'translator, 'module_translator> {
/// Translates an expression representing a modify target
pub fn translate_modify_target(&mut self, exp: &EA::Exp) -> Exp {
let loc = self.to_loc(&exp.loc);
let (_, exp) = self.translate_exp_free(exp);
match &exp {
Exp::Call(_, Operation::Global(_), _) => exp,
_ => {
self.error(&loc, "global resource access expected");
self.new_error_exp()
}
}
}
/// Translates an expression, with given expected type, which might be a type variable.
pub fn translate_exp(&mut self, exp: &EA::Exp, expected_type: &Type) -> Exp {
let loc = self.to_loc(&exp.loc);
let make_value = |et: &mut ExpTranslator, val: Value, ty: Type| {
let rty = et.check_type(&loc, &ty, expected_type, "in expression");
let id = et.new_node_id_with_type_loc(&rty, &loc);
Exp::Value(id, val)
};
match &exp.value {
EA::Exp_::Value(v) => {
if let Some((v, ty)) = self.translate_value(v) {
make_value(self, v, ty)
} else {
self.new_error_exp()
}
}
EA::Exp_::InferredNum(x) => {
// We don't really need to infer type, because all ints are exchangeable.
make_value(
self,
Value::Number(BigInt::from_u128(*x).unwrap()),
Type::new_prim(PrimitiveType::U128),
)
}
EA::Exp_::Name(maccess, type_params) => {
self.translate_name(&loc, maccess, type_params.as_deref(), expected_type)
}
EA::Exp_::Call(maccess, type_params, args) => {
// Need to make a &[&Exp] out of args.
let args = args.value.iter().collect_vec();
self.translate_fun_call(
expected_type,
&loc,
&maccess,
type_params.as_deref(),
&args,
)
}
EA::Exp_::Pack(maccess, generics, fields) => {
self.translate_pack(&loc, maccess, generics, fields, expected_type)
}
EA::Exp_::IfElse(cond, then, else_) => {
let then = self.translate_exp(&*then, expected_type);
let else_ = self.translate_exp(&*else_, expected_type);
let cond = self.translate_exp(&*cond, &Type::new_prim(PrimitiveType::Bool));
let id = self.new_node_id_with_type_loc(expected_type, &loc);
Exp::IfElse(id, Box::new(cond), Box::new(then), Box::new(else_))
}
EA::Exp_::Block(seq) => self.translate_seq(&loc, seq, expected_type),
EA::Exp_::Lambda(bindings, exp) => {
self.translate_lambda(&loc, bindings, exp, expected_type)
}
EA::Exp_::Quant(kind, ranges, triggers, condition, body) => self.translate_quant(
&loc,
*kind,
ranges,
triggers,
condition,
body,
expected_type,
),
EA::Exp_::BinopExp(l, op, r) => {
let args = vec![l.as_ref(), r.as_ref()];
let QualifiedSymbol {
module_name,
symbol,
} = self.parent.parent.bin_op_symbol(&op.value);
self.translate_call(&loc, &Some(module_name), symbol, None, &args, expected_type)
}
EA::Exp_::UnaryExp(op, exp) => {
let args = vec![exp.as_ref()];
let QualifiedSymbol {
module_name,
symbol,
} = self.parent.parent.unary_op_symbol(&op.value);
self.translate_call(&loc, &Some(module_name), symbol, None, &args, expected_type)
}
EA::Exp_::ExpDotted(dotted) => self.translate_dotted(dotted, expected_type),
EA::Exp_::Index(target, index) => {
self.translate_index(&loc, target, index, expected_type)
}
EA::Exp_::ExpList(exps) => {
let mut types = vec![];
let exps = exps
.iter()
.map(|exp| {
let (ty, exp) = self.translate_exp_free(exp);
types.push(ty);
exp
})
.collect_vec();
let ty = self.check_type(
&loc,
&Type::Tuple(types),
expected_type,
"in expression list",
);
let id = self.new_node_id_with_type_loc(&ty, &loc);
Exp::Call(id, Operation::Tuple, exps)
}
EA::Exp_::Unit { .. } => {
let ty = self.check_type(
&loc,
&Type::Tuple(vec![]),
expected_type,
"in unit expression",
);
let id = self.new_node_id_with_type_loc(&ty, &loc);
Exp::Call(id, Operation::Tuple, vec![])
}
EA::Exp_::Assign(..) => {
self.error(&loc, "assignment only allowed in spec var updates");
self.new_error_exp()
}
EA::Exp_::Dereference(exp) | EA::Exp_::Borrow(_, exp) => {
if self.translating_fun_as_spec_fun {
self.translate_exp(exp, expected_type)
} else {
self.error(&loc, "expression construct not supported in specifications");
self.new_error_exp()
}
}
_ => {
self.error(&loc, "expression construct not supported in specifications");
self.new_error_exp()
}
}
}
pub fn translate_value(&mut self, v: &EA::Value) -> Option<(Value, Type)> {
let loc = self.to_loc(&v.loc);
match &v.value {
EA::Value_::Address(addr) => {
let addr_str = &format!("{}", addr);
if &addr_str[..2] == "0x" {
let digits_only = &addr_str[2..];
Some((
Value::Address(
BigUint::from_str_radix(digits_only, 16).expect("valid address"),
),
Type::new_prim(PrimitiveType::Address),
))
} else {
self.error(&loc, "address string does not begin with '0x'");
None
}
}
EA::Value_::U8(x) => Some((
Value::Number(BigInt::from_u8(*x).unwrap()),
Type::new_prim(PrimitiveType::U8),
)),
EA::Value_::U64(x) => Some((
Value::Number(BigInt::from_u64(*x).unwrap()),
Type::new_prim(PrimitiveType::U64),
)),
EA::Value_::U128(x) => Some((
Value::Number(BigInt::from_u128(*x).unwrap()),
Type::new_prim(PrimitiveType::U128),
)),
EA::Value_::Bool(x) => Some((Value::Bool(*x), Type::new_prim(PrimitiveType::Bool))),
EA::Value_::Bytearray(x) => {
let ty = Type::Vector(Box::new(Type::new_prim(PrimitiveType::U8)));
Some((Value::ByteArray(x.clone()), ty))
}
}
}
fn translate_fun_call(
&mut self,
expected_type: &Type,
loc: &Loc,
maccess: &Spanned<EA::ModuleAccess_>,
generics: Option<&[EA::Type]>,
args: &[&EA::Exp],
) -> Exp {
// First check for builtin functions.
if let EA::ModuleAccess_::Name(n) = &maccess.value {
if n.value == "update_field" {
return self.translate_update_field(expected_type, loc, generics, args);
}
}
// First check whether this is an Invoke on a function value.
if let EA::ModuleAccess_::Name(n) = &maccess.value {
let sym = self.symbol_pool().make(&n.value);
if let Some(entry) = self.lookup_local(sym, false) {
// Check whether the local has the expected function type.
let sym_ty = entry.type_.clone();
let (arg_types, args) = self.translate_exp_list(args, false);
let fun_t = Type::Fun(arg_types, Box::new(expected_type.clone()));
let sym_ty = self.check_type(&loc, &sym_ty, &fun_t, "in expression");
let local_id = self.new_node_id_with_type_loc(&sym_ty, &self.to_loc(&n.loc));
let local_var = Exp::LocalVar(local_id, sym);
let id = self.new_node_id_with_type_loc(expected_type, &loc);
return Exp::Invoke(id, Box::new(local_var), args);
}
if let Some(fid) = self.parent.spec_block_lets.get(&sym).cloned() {
let (_, args) = self.translate_exp_list(args, false);
return self.translate_let(loc, generics, args, expected_type, fid);
}
}
// Next treat this as a call to a global function.
let (module_name, name) = self.parent.module_access_to_parts(maccess);
// Ignore assert statement.
if name == self.parent.parent.assert_symbol() {
return Exp::Call(
self.new_node_id_with_type_loc(expected_type, &self.to_loc(&maccess.loc)),
Operation::NoOp,
vec![],
);
}
let is_old = module_name.is_none() && name == self.parent.parent.old_symbol();
if is_old {
match self.old_status {
OldExpStatus::NotSupported => {
self.error(&loc, "`old(..)` expression not allowed in this context");
}
OldExpStatus::InsideOld => {
self.error(&loc, "`old(..old(..)..)` not allowed");
}
OldExpStatus::OutsideOld => {
self.old_status = OldExpStatus::InsideOld;
}
}
}
let result = self.translate_call(&loc, &module_name, name, generics, args, expected_type);
if is_old && self.old_status == OldExpStatus::InsideOld {
self.old_status = OldExpStatus::OutsideOld;
}
result
}
/// Translates an expression without any known type expectation. This creates a fresh type
/// variable and passes this in as expected type, then returns a pair of this type and the
/// translated expression.
pub fn translate_exp_free(&mut self, exp: &EA::Exp) -> (Type, Exp) {
let tvar = self.fresh_type_var();
let exp = self.translate_exp(exp, &tvar);
(self.subs.specialize(&tvar), exp)
}
/// Translates a sequence expression.
pub fn translate_seq(&mut self, loc: &Loc, seq: &EA::Sequence, expected_type: &Type) -> Exp {
let n = seq.len();
if n == 0 {
self.error(loc, "block sequence cannot be empty");
return self.new_error_exp();
}
// Process all items before the last one, which must be bindings, and accumulate
// declarations for them.
let mut decls = vec![];
let seq = seq.iter().collect_vec();
for item in &seq[0..seq.len() - 1] {
match &item.value {
EA::SequenceItem_::Bind(list, exp) => {
let (t, e) = self.translate_exp_free(exp);
if list.value.len() != 1 {
self.error(
&self.to_loc(&list.loc),
"[current restriction] tuples not supported in let",
);
return Exp::Invalid(self.new_node_id());
}
let bind_loc = self.to_loc(&list.value[0].loc);
match &list.value[0].value {
EA::LValue_::Var(maccess, _) => {
let name = match &maccess.value {
EA::ModuleAccess_::Name(n) => n,
EA::ModuleAccess_::ModuleAccess(_, n) => n,
};
// Define the local. Currently we mimic
// Rust/ML semantics here, allowing to shadow with each let,
// thus entering a new scope.
self.enter_scope();
let name = self.symbol_pool().make(&name.value);
self.define_local(&bind_loc, name, t.clone(), None, None);
let id = self.new_node_id_with_type_loc(&t, &bind_loc);
decls.push(LocalVarDecl {
id,
name,
binding: Some(e),
});
}
EA::LValue_::Unpack(..) => {
self.error(
&bind_loc,
"[current restriction] unpack not supported in let",
);
return Exp::Invalid(self.new_node_id());
}
}
}
EA::SequenceItem_::Seq(e) => {
let translated = self.translate_exp(e, expected_type);
match translated {
Exp::Call(_, Operation::NoOp, _) => { /* allow assert statement */ }
_ => self.error(
&self.to_loc(&item.loc),
"only binding `let p = e; ...` allowed here",
),
}
}
_ => self.error(
&self.to_loc(&item.loc),
"only binding `let p = e; ...` allowed here",
),
}
}
// Process the last element, which must be an Exp item.
let last = match &seq[n - 1].value {
EA::SequenceItem_::Seq(e) => self.translate_exp(e, expected_type),
_ => {
self.error(
&self.to_loc(&seq[n - 1].loc),
"expected an expression as the last element of the block",
);
self.new_error_exp()
}
};
// Exit the scopes for variable bindings
for _ in 0..decls.len() {
self.exit_scope();
}
let id = self.new_node_id_with_type_loc(expected_type, loc);
Exp::Block(id, decls, Box::new(last))
}
/// Translates a name. Reports an error if the name is not found.
fn translate_name(
&mut self,
loc: &Loc,
maccess: &EA::ModuleAccess,
type_args: Option<&[EA::Type]>,
expected_type: &Type,
) -> Exp {
let global_var_sym = match &maccess.value {
EA::ModuleAccess_::ModuleAccess(..) => self.parent.module_access_to_qualified(maccess),
EA::ModuleAccess_::Name(name) => {
// First try to resolve simple name as local.
let sym = self.symbol_pool().make(name.value.as_str());
if let Some(exp) = self.resolve_local(
loc,
sym,
self.old_status == OldExpStatus::InsideOld,
expected_type,
) {
return exp;
}
// Next try to resolve simple name as a let.
if let Some(fid) = self.parent.spec_block_lets.get(&sym).cloned() {
return self.translate_let(loc, type_args, vec![], expected_type, fid);
}
// If not found, try to resolve as builtin constant.
let builtin_sym = self.parent.parent.builtin_qualified_symbol(&name.value);
if let Some(entry) = self.parent.parent.const_table.get(&builtin_sym).cloned() {
return self.translate_constant(loc, entry, expected_type);
}
// If not found, treat as global var in this module.
self.parent.qualified_by_module(sym)
}
};
if let Some(entry) = self.parent.parent.const_table.get(&global_var_sym).cloned() {
return self.translate_constant(loc, entry, expected_type);
}
if let Some(entry) = self.parent.parent.spec_var_table.get(&global_var_sym) {
let type_args = type_args.unwrap_or(&[]);
if entry.type_params.len() != type_args.len() {
self.error(
loc,
&format!(
"generic count mismatch (expected {} but found {})",
entry.type_params.len(),
type_args.len()
),
);
return self.new_error_exp();
}
let ty = entry.type_.clone();
let module_id = entry.module_id;
let var_id = entry.var_id;
let instantiation = self.translate_types(type_args);
let ty = ty.instantiate(&instantiation);
let ty = self.check_type(loc, &ty, expected_type, "in spec var expression");
let id = self.new_node_id_with_type_loc(&ty, loc);
// Remember the instantiation as an attribute on the expression node.
self.set_node_instantiation(id, instantiation);
return Exp::SpecVar(id, module_id, var_id, None);
}
self.error(
loc,
&format!(
"undeclared `{}`",
global_var_sym.display(self.symbol_pool())
),
);
self.new_error_exp()
}
/// Creates an expression for a constant, checking the expected type.
fn translate_constant(&mut self, loc: &Loc, entry: ConstEntry, expected_type: &Type) -> Exp {
let ConstEntry { ty, value, .. } = entry;
let ty = self.check_type(loc, &ty, expected_type, "in const expression");
let id = self.new_node_id_with_type_loc(&ty, loc);
Exp::Value(id, value)
}
fn resolve_local(
&mut self,
loc: &Loc,
mut sym: Symbol,
in_old: bool,
expected_type: &Type,
) -> Option<Exp> {
if let Some(entry) = self.lookup_local(sym, in_old) {
let oper_opt = entry.operation.clone();
let index_opt = entry.temp_index;
let ty = entry.type_.clone();
let ty = self.check_type(loc, &ty, expected_type, "in name expression");
let id = self.new_node_id_with_type_loc(&ty, loc);
if let Some(oper) = oper_opt {
Some(Exp::Call(id, oper, vec![]))
} else if let Some(index) =
index_opt.filter(|_| !self.translating_fun_as_spec_fun && !self.in_let)
{
// Only create a temporary if we are not currently translating a move function as
// a spec function, or a let. In this case, the LocalVarEntry has a bytecode index, but
// we do not want to use this if interpreted as a spec fun.
Some(Exp::Temporary(id, index))
} else {
if self.in_let {
// Mangle the name for context local of let.
sym = self.make_context_local_name(sym, in_old);
}
Some(Exp::LocalVar(id, sym))
}
} else {
None
}
}
fn translate_let(
&mut self,
loc: &Loc,
user_type_args: Option<&[EA::Type]>,
args: Vec<Exp>,
expected_type: &Type,
fid: SpecFunId,
) -> Exp {
let decl = &self.parent.spec_funs[fid.as_usize()].clone();
let type_args = user_type_args.map(|a| self.translate_types(a));
let context_type_args = self.get_type_params();
let (instantiation, diag) =
self.make_instantiation(decl.type_params.len(), context_type_args, type_args);
if let Some(msg) = diag {
self.error(loc, &msg);
return self.new_error_exp();
}
// Create the context args for this let.
let mut all_args = vec![];
for (name, in_old) in decl
.context_params
.as_ref()
.expect("context_params defined for let function")
{
let actual_name = self.make_context_local_name(*name, *in_old);
let (_, ty) = decl
.params
.iter()
.find(|(n, _)| *n == actual_name)
.expect("context param defined in params");
let ty = ty.instantiate(&instantiation);
if let Some(mut arg) = self.resolve_local(loc, *name, *in_old, &ty) {
if *in_old && !self.in_let {
// Context local is accessed in old mode and outside of a let, wrap
// expression to get the old value.
arg = Exp::Call(arg.node_id(), Operation::Old, vec![arg]);
}
all_args.push(arg);
} else {
// This should not happen, but lets be robust and report an internal error.
self.error(
loc,
&format!(
"[internal] error in resolving let context `{}`",
self.symbol_pool().string(*name)
),
);
}
}
// Add additional args for lambda.
let remaining_args = decl.params.len() - all_args.len();
if remaining_args != args.len() {
self.error(
loc,
&format!(
"expected {}, but got {} arguments for let name",
remaining_args,
args.len()
),
);
} else {
// Type check args and add them.
let lambda_start = all_args.len();
for (i, arg) in args.into_iter().enumerate() {
let node_id = arg.node_id();
let env = &self.parent.parent.env;
let loc = env.get_node_loc(node_id);
let ty = env.get_node_type(node_id);
let param_ty = &decl.params[lambda_start + i].1;
self.check_type(&loc, &ty, param_ty, "lambda argument");
all_args.push(arg);
}
}
// Check the expected type.
let return_type = decl.result_type.instantiate(&instantiation);
self.check_type(loc, &return_type, expected_type, "let value");
// Create the call of the function representing this let.
let node_id = self.new_node_id_with_type_loc(&return_type, loc);
self.set_node_instantiation(node_id, instantiation);
Exp::Call(
node_id,
Operation::Function(self.parent.module_id, fid, None),
all_args,
)
}
pub fn make_context_local_name(&self, name: Symbol, in_old: bool) -> Symbol {
if in_old {
self.symbol_pool()
.make(&format!("{}_$old", name.display(self.symbol_pool())))
} else {
name
}
}
/// Translate an Index expression.
fn translate_index(
&mut self,
loc: &Loc,
target: &EA::Exp,
index: &EA::Exp,
expected_type: &Type,
) -> Exp {
// We must concretize the type of index to decide whether this is a slice
// or not. This is not compatible with full type inference, so we may
// try to actually represent slicing explicitly in the syntax to fix this.
// Alternatively, we could leave it to the backend to figure (after full
// type inference) whether this is slice or index.
let elem_ty = self.fresh_type_var();
let vector_ty = Type::Vector(Box::new(elem_ty.clone()));
let vector_exp = self.translate_exp(target, &vector_ty);
let (index_ty, ie) = self.translate_exp_free(index);
let index_ty = self.subs.specialize(&index_ty);
let (result_t, oper) = if let Type::Primitive(PrimitiveType::Range) = &index_ty {
(vector_ty, Operation::Slice)
} else {
// If this is not (known to be) a range, assume its an index.
self.check_type(
&loc,
&index_ty,
&Type::new_prim(PrimitiveType::Num),
"in index expression",
);
(elem_ty, Operation::Index)
};
let result_t = self.check_type(loc, &result_t, expected_type, "in index expression");
let id = self.new_node_id_with_type_loc(&result_t, &loc);
Exp::Call(id, oper, vec![vector_exp, ie])
}
/// Translate a Dotted expression.
fn translate_dotted(&mut self, dotted: &EA::ExpDotted, expected_type: &Type) -> Exp {
match &dotted.value {
EA::ExpDotted_::Exp(e) => self.translate_exp(e, expected_type),
EA::ExpDotted_::Dot(e, n) => {
let loc = self.to_loc(&dotted.loc);
let ty = self.fresh_type_var();
let exp = self.translate_dotted(e.as_ref(), &ty);
if let Some((struct_id, field_id, field_ty)) = self.lookup_field(&loc, &ty, n) {
self.check_type(&loc, &field_ty, expected_type, "in field selection");
let id = self.new_node_id_with_type_loc(&field_ty, &loc);
Exp::Call(
id,
Operation::Select(struct_id.module_id, struct_id.id, field_id),
vec![exp],
)
} else {
self.new_error_exp()
}
}
}
}
/// Translate the builtin function `update_field<generics>(args)`. The first arg must
/// be a field name, the second the expression to assign the field.
fn translate_update_field(
&mut self,
expected_type: &Type,
loc: &Loc,
generics: Option<&[EA::Type]>,
args: &[&EA::Exp],
) -> Exp {
if generics.is_some() {
self.error(loc, "`update_field` cannot have type parameters");
return self.new_error_exp();
}
if args.len() != 3 {
self.error(loc, "`update_field` requires 3 arguments");
return self.new_error_exp();
}
let struct_exp = self.translate_exp(&args[0], expected_type);
if let EA::Exp_::Name(
Spanned {
value: EA::ModuleAccess_::Name(name),
..
},
None,
) = &args[1].value
{
if let Some((struct_id, field_id, field_type)) =
self.lookup_field(loc, &expected_type, name)
{
// Translate the new value with the field type as the expected type.
let value_exp = self.translate_exp(&args[2], &field_type);
let id = self.new_node_id_with_type_loc(&expected_type, loc);
Exp::Call(
id,
Operation::UpdateField(struct_id.module_id, struct_id.id, field_id),
vec![struct_exp, value_exp],
)
} else {
// Error reported
self.new_error_exp()
}
} else {
self.error(
loc,
"second argument of `update_field` must be a field name",
);
self.new_error_exp()
}
}
/// Loops up a field in a struct. Returns field information or None after reporting errors.
fn lookup_field(
&mut self,
loc: &Loc,
struct_ty: &Type,
name: &Name,
) -> Option<(QualifiedId<StructId>, FieldId, Type)> {
// Similar as with Index, we must concretize the type of the expression on which
// field selection is performed, violating pure type inference rules, so we can actually
// check and retrieve the field. To avoid this, we would need to introduce the concept
// of a type constraint to type unification, where the constraint would be
// 'type var X where X has field F'. This makes unification significant more complex,
// so lets see how far we get without this.
let struct_ty = self.subs.specialize(&struct_ty);
let field_name = self.symbol_pool().make(&name.value);
if let Type::Struct(mid, sid, targs) = &struct_ty {
// Lookup the StructEntry in the build. It must be defined for valid
// Type::Struct instances.
let struct_name = self
.parent
.parent
.reverse_struct_table
.get(&(*mid, *sid))
.expect("invalid Type::Struct");
let entry = self
.parent
.parent
.struct_table
.get(struct_name)
.expect("invalid Type::Struct");
// Lookup the field in the struct.
if let Some(fields) = &entry.fields {
if let Some((_, field_ty)) = fields.get(&field_name) {
// We must instantiate the field type by the provided type args.
let field_ty = field_ty.instantiate(targs);
Some((
entry.module_id.qualified(entry.struct_id),
FieldId::new(field_name),
field_ty,
))
} else {
self.error(
&loc,
&format!(
"field `{}` not declared in struct `{}`",
field_name.display(self.symbol_pool()),
struct_name.display(self.symbol_pool())
),
);
None
}
} else {
self.error(
&loc,
&format!(
"struct `{}` is native and does not support field selection",
struct_name.display(self.symbol_pool())
),
);
None
}
} else {
self.error(
&loc,
&format!(
"type `{}` cannot be resolved as a struct",
struct_ty.display(&self.type_display_context()),
),
);
None
}
}
/// Translates a call, performing overload resolution. Reports an error if the function cannot be found.
/// This is used to resolve both calls to user spec functions and builtin operators.
fn translate_call(
&mut self,
loc: &Loc,
module: &Option<ModuleName>,
name: Symbol,
generics: Option<&[EA::Type]>,
args: &[&EA::Exp],
expected_type: &Type,
) -> Exp {
// Translate generic arguments, if any.
let generics = generics.as_ref().map(|ts| self.translate_types(&ts));
// Translate arguments. Skip any lambda expressions; they are resolved after the overload
// is identified to avoid restrictions with type inference.
let (arg_types, mut translated_args) = self.translate_exp_list(args, true);
let args_have_errors = arg_types.iter().any(|t| t == &Type::Error);
// Lookup candidates.
let cand_modules = if let Some(m) = module {
vec![m.clone()]
} else {
// For an unqualified name, resolve it both in this and in the builtin pseudo module.
vec![
self.parent.module_name.clone(),
self.parent.parent.builtin_module(),
]
};
let mut cands = vec![];
for module_name in cand_modules {
let full_name = QualifiedSymbol {
module_name,
symbol: name,
};
if let Some(list) = self.parent.parent.spec_fun_table.get(&full_name) {
cands.extend_from_slice(list);
}
}
if cands.is_empty() {
let display = self.display_call_target(module, name);
self.error(loc, &format!("no function named `{}` found", display));
return self.new_error_exp();
}
// Partition candidates in those which matched and which have been outruled.
let mut outruled = vec![];
let mut matching = vec![];
for cand in &cands {
if cand.arg_types.len() != translated_args.len() {
outruled.push((
cand,
format!(
"argument count mismatch (expected {} but found {})",
cand.arg_types.len(),
translated_args.len()
),
));
continue;
}
let (instantiation, diag) =
self.make_instantiation(cand.type_params.len(), vec![], generics.clone());
if let Some(msg) = diag {
outruled.push((cand, msg));
continue;
}
// Clone the current substitution, then unify arguments against parameter types.
let mut subs = self.subs.clone();
let mut success = true;
for (i, arg_ty) in arg_types.iter().enumerate() {
let instantiated = cand.arg_types[i].instantiate(&instantiation);
if let Err(err) = subs.unify(&self.type_display_context(), arg_ty, &instantiated) {
outruled.push((cand, format!("{} for argument {}", err.message, i + 1)));
success = false;
break;
}
}
if success {
matching.push((cand, subs, instantiation));
}
}
// Deliver results, reporting errors if there are no or ambiguous matches.
match matching.len() {
0 => {
// Only report error if args had no errors.
if !args_have_errors {
let display = self.display_call_target(module, name);
let notes = outruled
.iter()
.map(|(cand, msg)| {
format!(
"outruled candidate `{}` ({})",
self.display_call_cand(module, name, cand),
msg
)
})
.collect_vec();
self.parent.parent.env.error_with_notes(
loc,
&format!("no matching declaration of `{}`", display),
notes,
);
}
self.new_error_exp()
}
1 => {
let (cand, subs, instantiation) = matching.remove(0);
// Commit the candidate substitution to this expression build.
self.subs = subs;
// Now translate lambda-based arguments passing expected type to aid type inference.
for i in 0..translated_args.len() {
let e = args[i];
if matches!(e.value, EA::Exp_::Lambda(..)) {
let expected_type = self.subs.specialize(&arg_types[i]);
translated_args[i] = self.translate_exp(e, &expected_type);
}
}
// Check result type against expected type.
let ty = self.check_type(
loc,
&cand.result_type.instantiate(&instantiation),
expected_type,
"in expression",
);
// Construct result.
let id = self.new_node_id_with_type_loc(&ty, loc);
self.set_node_instantiation(id, instantiation);
if let Operation::Function(module_id, spec_fun_id, None) = cand.oper {
if !self.translating_fun_as_spec_fun {
// Record the usage of spec function in specs, used later
// in spec build.
self.parent.parent.add_used_spec_fun(module_id, spec_fun_id);
}
let module_name = match module {
Some(m) => m,
_ => &self.parent.module_name,
}
.clone();
let qsym = QualifiedSymbol {
module_name,
symbol: name,
};
// If the spec function called is from a Move function,
// error if it is not pure.
if let Some(entry) = self.parent.parent.fun_table.get(&qsym) {
if !entry.is_pure {
if self.translating_fun_as_spec_fun {
// The Move function is calling another impure Move function,
// so it should be considered impure.
if module_id.to_usize() < self.parent.module_id.to_usize() {
self.error(loc, "Move function calls impure Move function");
return self.new_error_exp();
}
} else {
let display = self.display_call_target(module, name);
let notes = vec![format!(
"impure function `{}`",
self.display_call_cand(module, name, cand),
)];
self.parent.parent.env.error_with_notes(
loc,
&format!(
"calling impure function `{}` is not allowed",
display
),
notes,
);
return self.new_error_exp();
}
}
}
self.called_spec_funs.insert((module_id, spec_fun_id));
}
Exp::Call(id, cand.oper.clone(), translated_args)
}
_ => {
// Only report error if args had no errors.
if !args_have_errors {
let display = self.display_call_target(module, name);
let notes = matching
.iter()
.map(|(cand, _, _)| {
format!(
"matching candidate `{}`",
self.display_call_cand(module, name, cand)
)
})
.collect_vec();
self.parent.parent.env.error_with_notes(
loc,
&format!("ambiguous application of `{}`", display),
notes,
);
}
self.new_error_exp()
}
}
}
/// Translate a list of expressions and deliver them together with their types.
fn translate_exp_list(
&mut self,
exps: &[&EA::Exp],
skip_lambda: bool,
) -> (Vec<Type>, Vec<Exp>) {
let mut types = vec![];
let exps = exps
.iter()
.map(|e| {
let (t, e) = if !skip_lambda || !matches!(e.value, EA::Exp_::Lambda(..)) {
self.translate_exp_free(e)
} else {
// In skip-lambda mode, just create a fresh type variable. We translate
// the expression in a second pass, once the expected type is known.
(self.fresh_type_var(), Exp::Invalid(NodeId::new(0)))
};
types.push(t);
e
})
.collect_vec();
(types, exps)
}
/// Creates a type instantiation based on provided actual type parameters.
fn make_instantiation(
&mut self,
param_count: usize,
context_args: Vec<Type>,
user_args: Option<Vec<Type>>,
) -> (Vec<Type>, Option<String>) {
let mut args = context_args;
let expected_user_count = param_count - args.len();
if let Some(types) = user_args {
let n = types.len();
args.extend(types.into_iter());
if n != expected_user_count {
(
args,
Some(format!(
"generic count mismatch (expected {} but found {})",
expected_user_count, n,
)),
)
} else {
(args, None)
}
} else {
// Create fresh type variables for user args
for _ in 0..expected_user_count {
args.push(self.fresh_type_var());
}
(args, None)
}
}
fn translate_pack(
&mut self,
loc: &Loc,
maccess: &EA::ModuleAccess,
generics: &Option<Vec<EA::Type>>,
fields: &EA::Fields<EA::Exp>,
expected_type: &Type,
) -> Exp {
let struct_name = self.parent.module_access_to_qualified(maccess);
let struct_name_loc = self.to_loc(&maccess.loc);
let generics = generics.as_ref().map(|ts| self.translate_types(&ts));
if let Some(entry) = self.parent.parent.struct_table.get(&struct_name) {
let entry = entry.clone();
let (instantiation, diag) =
self.make_instantiation(entry.type_params.len(), vec![], generics);
if let Some(msg) = diag {
self.error(loc, &msg);
return self.new_error_exp();
}
if let Some(field_decls) = &entry.fields {
let mut fields_not_covered: BTreeSet<Symbol> = BTreeSet::new();
fields_not_covered.extend(field_decls.keys());
let mut args = BTreeMap::new();
for (name_loc, name_, (_, exp)) in fields.iter() {
let field_name = self.symbol_pool().make(&name_);
if let Some((idx, field_ty)) = field_decls.get(&field_name) {
let exp = self.translate_exp(exp, &field_ty.instantiate(&instantiation));
fields_not_covered.remove(&field_name);
args.insert(idx, exp);
} else {
self.error(
&self.to_loc(&name_loc),
&format!(
"field `{}` not declared in struct `{}`",
field_name.display(self.symbol_pool()),
struct_name.display(self.symbol_pool())
),
);
}
}
if !fields_not_covered.is_empty() {
self.error(
loc,
&format!(
"missing fields {}",
fields_not_covered
.iter()
.map(|n| format!("`{}`", n.display(self.symbol_pool())))
.join(", ")
),
);
self.new_error_exp()
} else {
let struct_ty = Type::Struct(entry.module_id, entry.struct_id, instantiation);
let struct_ty =
self.check_type(loc, &struct_ty, expected_type, "in pack expression");
let mut args = args
.into_iter()
.sorted_by_key(|(i, _)| *i)
.map(|(_, e)| e)
.collect_vec();
if args.is_empty() {
// The move compiler inserts a dummy field with the value of false
// for structs with no fields. This is also what we find in the
// Model metadata (i.e. a field `dummy_field`). We simulate this here
// for now, though it would be better to remove it everywhere as it
// can be confusing to users. However, its currently hard to do this,
// because a user could also have defined the `dummy_field`.
let id = self.new_node_id_with_type_loc(&BOOL_TYPE, loc);
args.push(Exp::Value(id, Value::Bool(false)));
}
let id = self.new_node_id_with_type_loc(&struct_ty, loc);
Exp::Call(id, Operation::Pack(entry.module_id, entry.struct_id), args)
}
} else {
self.error(
&struct_name_loc,
&format!(
"native struct `{}` cannot be packed",
struct_name.display(self.symbol_pool())
),
);
self.new_error_exp()
}
} else {
self.error(
&struct_name_loc,
&format!(
"undeclared struct `{}`",
struct_name.display(self.symbol_pool())
),
);
self.new_error_exp()
}
}
fn translate_lambda(
&mut self,
loc: &Loc,
bindings: &EA::LValueList,
body: &EA::Exp,
expected_type: &Type,
) -> Exp {
// Enter the lambda variables into a new local scope and collect their declarations.
self.enter_scope();
let mut decls = vec![];
let mut arg_types = vec![];
for bind in &bindings.value {
let loc = self.to_loc(&bind.loc);
match &bind.value {
EA::LValue_::Var(
Spanned {
value: EA::ModuleAccess_::Name(n),
..
},
_,
) => {
let name = self.symbol_pool().make(&n.value);
let ty = self.fresh_type_var();
let id = self.new_node_id_with_type_loc(&ty, &loc);
self.define_local(&loc, name, ty.clone(), None, None);
arg_types.push(ty);
decls.push(LocalVarDecl {
id,
name,
binding: None,
});
}
EA::LValue_::Unpack(..) | EA::LValue_::Var(..) => {
self.error(&loc, "[current restriction] tuples not supported in lambda")
}
}
}
// Create a fresh type variable for the body and check expected type before analyzing
// body. This aids type inference for the lambda parameters.
let ty = self.fresh_type_var();
let rty = self.check_type(
loc,
&Type::Fun(arg_types, Box::new(ty.clone())),
expected_type,
"in lambda",
);
let rbody = self.translate_exp(body, &ty);
self.exit_scope();
let id = self.new_node_id_with_type_loc(&rty, loc);
Exp::Lambda(id, decls, Box::new(rbody))
}
fn translate_quant(
&mut self,
loc: &Loc,
kind: PA::QuantKind,
ranges: &EA::LValueWithRangeList,
triggers: &[Vec<EA::Exp>],
condition: &Option<Box<EA::Exp>>,
body: &EA::Exp,
expected_type: &Type,
) -> Exp {
// Enter the quantifier variables into a new local scope and collect their declarations.
self.enter_scope();
let mut rranges = vec![];
for range in &ranges.value {
// The quantified variable and its domain expression.
let (bind, exp) = &range.value;
let loc = self.to_loc(&bind.loc);
let (exp_ty, rexp) = self.translate_exp_free(exp);
let ty = self.fresh_type_var();
let exp_ty = self.subs.specialize(&exp_ty);
match &exp_ty {
Type::Vector(..) => {
self.check_type(
&loc,
&exp_ty,
&Type::Vector(Box::new(ty.clone())),
"in quantification over vector",
);
}
Type::TypeDomain(..) => {
self.check_type(
&loc,
&exp_ty,
&Type::TypeDomain(Box::new(ty.clone())),
"in quantification over domain",
);
}
Type::Primitive(PrimitiveType::Range) => {
self.check_type(
&loc,
&ty,
&Type::Primitive(PrimitiveType::Num),
"in quantification over range",
);
}
_ => {
self.error(&loc, "quantified variables must range over a vector, a type domain, or a number range");
return self.new_error_exp();
}
}
match &bind.value {
EA::LValue_::Var(
Spanned {
value: EA::ModuleAccess_::Name(n),
..
},
_,
) => {
let name = self.symbol_pool().make(&n.value);
let id = self.new_node_id_with_type_loc(&ty, &loc);
self.define_local(&loc, name, ty.clone(), None, None);
let rbind = LocalVarDecl {
id,
name,
binding: None,
};
rranges.push((rbind, rexp));
}
EA::LValue_::Unpack(..) | EA::LValue_::Var(..) => self.error(
&loc,
"[current restriction] tuples not supported in quantifiers",
),
}
}
let rty = self.check_type(
loc,
&Type::new_prim(PrimitiveType::Bool),
expected_type,
"in quantified expression",
);
let rtriggers = triggers
.iter()
.map(|trigger| {
trigger
.iter()
.map(|e| self.translate_exp_free(e).1)
.collect()
})
.collect();
let rbody = self.translate_exp(body, &rty);
let rcondition = condition
.as_ref()
.map(|cond| Box::new(self.translate_exp(cond, &rty)));
self.exit_scope();
let id = self.new_node_id_with_type_loc(&rty, loc);
let rkind = match kind.value {
PA::QuantKind_::Forall => QuantKind::Forall,
PA::QuantKind_::Exists => QuantKind::Exists,
};
Exp::Quant(id, rkind, rranges, rtriggers, rcondition, Box::new(rbody))
}
pub fn check_type(&mut self, loc: &Loc, ty: &Type, expected: &Type, context_msg: &str) -> Type {
// Because of Rust borrow semantics, we must temporarily detach the substitution from
// the build. This is because we also need to inherently borrow self via the
// type_display_context which is passed into unification.
let mut subs = std::mem::replace(&mut self.subs, Substitution::new());
let result = match subs.unify(&self.type_display_context(), ty, expected) {
Ok(t) => t,
Err(err) => {
self.error(&loc, &format!("{} {}", err.message, context_msg));
Type::Error
}
};
self.subs = subs;
result
}
pub fn translate_from_move_value(&self, loc: &Loc, value: &MoveValue) -> Value {
match value {
MoveValue::U8(n) => Value::Number(BigInt::from_u8(*n).unwrap()),
MoveValue::U64(n) => Value::Number(BigInt::from_u64(*n).unwrap()),
MoveValue::U128(n) => Value::Number(BigInt::from_u128(*n).unwrap()),
MoveValue::Bool(b) => Value::Bool(*b),
MoveValue::Address(a) => Value::Address(ModuleEnv::addr_to_big_uint(a)),
MoveValue::Signer(a) => Value::Address(ModuleEnv::addr_to_big_uint(a)),
MoveValue::Vector(vs) => {
let b = vs
.iter()
.filter_map(|v| match v {
MoveValue::U8(n) => Some(*n),
_ => {
self.error(
loc,
&format!("Not yet supported constant vector value: {:?}", v),
);
None
}
})
.collect::<Vec<u8>>();
Value::ByteArray(b)
}
_ => {
self.error(
loc,
&format!("Not yet supported constant value: {:?}", value),
);
Value::Bool(false)
}
}
}
}
| 41.035025 | 120 | 0.487586 |
4ba411db37b25a48fe5beba0f5623e5dda77d421 | 901 | use std::collections::HashMap;
use std::io;
fn main() {
let mut map: HashMap<String, Vec<String>> = HashMap::new();
loop {
let mut str = String::new();
io::stdin().read_line(&mut str).expect("read failed");
if str.trim() == "quit" {
break;
}
add_employee(str.trim(), &mut map);
}
for (key, value) in map.iter_mut() {
value.sort_unstable();
}
println!("{:?}", map);
}
fn add_employee(str: &str, map: &mut HashMap<String, Vec<String>>) {
let (employee, department) = process_command(str);
let v = map.entry(department).or_insert(Vec::new());
v.push(employee);
}
fn process_command(str: &str) -> (String, String) {
let v: Vec<&str> = str.split_whitespace().collect();
assert_eq!(v.len(), 4);
assert_eq!(v[0], "Add");
assert_eq!(v[2], "to");
(v[1].to_string(), v[3].to_string())
}
| 23.710526 | 68 | 0.560488 |
62eb7582a2f70f0b093dfabcd628ee555c472edc | 347 | use std::alloc::{alloc, dealloc, realloc, Layout};
// error-pattern: dereferenced after this allocation got freed
fn main() {
unsafe {
let x = alloc(Layout::from_size_align_unchecked(1, 1));
dealloc(x, Layout::from_size_align_unchecked(1, 1));
let _z = realloc(x, Layout::from_size_align_unchecked(1, 1), 1);
}
}
| 28.916667 | 72 | 0.659942 |
f755838b8692fc1ff4a4ed605fe49c5f7d1bebb6 | 2,360 | use super::super::CommandBorrowed;
use crate::{
client::Client,
error::Error,
request::{Request, RequestBuilder},
response::ResponseFuture,
routing::Route,
};
use twilight_model::{
application::command::{Command, CommandType},
id::{ApplicationId, GuildId},
};
/// Create a message command in a guild.
///
/// Creating a guild command with the same name as an already-existing guild
/// command in the same guild will overwrite the old command. See [the discord
/// docs] for more information.
///
/// [the discord docs]: https://discord.com/developers/docs/interactions/application-commands#create-guild-application-command
#[must_use = "requests must be configured and executed"]
pub struct CreateGuildMessageCommand<'a> {
application_id: ApplicationId,
default_permission: Option<bool>,
guild_id: GuildId,
http: &'a Client,
name: &'a str,
}
impl<'a> CreateGuildMessageCommand<'a> {
pub(crate) const fn new(
http: &'a Client,
application_id: ApplicationId,
guild_id: GuildId,
name: &'a str,
) -> Self {
Self {
application_id,
default_permission: None,
guild_id,
http,
name,
}
}
/// Whether the command is enabled by default when the app is added to a
/// guild.
pub const fn default_permission(mut self, default: bool) -> Self {
self.default_permission = Some(default);
self
}
fn request(&self) -> Result<Request, Error> {
Request::builder(&Route::CreateGuildCommand {
application_id: self.application_id.0,
guild_id: self.guild_id.0,
})
.json(&CommandBorrowed {
application_id: Some(self.application_id),
default_permission: self.default_permission,
description: None,
kind: CommandType::Message,
name: self.name,
options: None,
})
.map(RequestBuilder::build)
}
/// Execute the request, returning a future resolving to a [`Response`].
///
/// [`Response`]: crate::response::Response
pub fn exec(self) -> ResponseFuture<Command> {
match self.request() {
Ok(request) => self.http.request(request),
Err(source) => ResponseFuture::error(source),
}
}
}
| 29.5 | 126 | 0.613983 |
0a29701a8fef36b017579d5d1d937cc96d545649 | 26,723 | // Copyright 2017 TiKV Project Authors. Licensed under Apache-2.0.
use std::{
fs,
fs::File,
io,
io::prelude::*,
path::PathBuf,
sync::{atomic::Ordering, mpsc, Arc, Mutex},
thread,
time::Duration,
};
use engine_traits::RaftEngineReadOnly;
use kvproto::raft_serverpb::RaftMessage;
use raft::eraftpb::MessageType;
use test_raftstore::*;
use tikv_util::{config::*, time::Instant, HandyRwLock};
#[test]
fn test_overlap_cleanup() {
let mut cluster = new_node_cluster(0, 3);
// Disable raft log gc in this test case.
cluster.cfg.raft_store.raft_log_gc_tick_interval = ReadableDuration::secs(60);
let gen_snapshot_fp = "region_gen_snap";
let pd_client = Arc::clone(&cluster.pd_client);
// Disable default max peer count check.
pd_client.disable_default_operator();
let region_id = cluster.run_conf_change();
pd_client.must_add_peer(region_id, new_peer(2, 2));
cluster.must_put(b"k1", b"v1");
must_get_equal(&cluster.get_engine(2), b"k1", b"v1");
cluster.must_transfer_leader(region_id, new_peer(2, 2));
// This will only pause the bootstrapped region, so the split region
// can still work as expected.
fail::cfg(gen_snapshot_fp, "pause").unwrap();
pd_client.must_add_peer(region_id, new_peer(3, 3));
cluster.must_put(b"k3", b"v3");
assert_snapshot(&cluster.get_snap_dir(2), region_id, true);
let region1 = cluster.get_region(b"k1");
cluster.must_split(®ion1, b"k2");
// Wait till the snapshot of split region is applied, whose range is ["", "k2").
must_get_equal(&cluster.get_engine(3), b"k1", b"v1");
// Resume the fail point and pause it again. So only the paused snapshot is generated.
// And the paused snapshot's range is ["", ""), hence overlap.
fail::cfg(gen_snapshot_fp, "pause").unwrap();
// Overlap snapshot should be deleted.
assert_snapshot(&cluster.get_snap_dir(3), region_id, false);
fail::remove(gen_snapshot_fp);
}
// When resolving remote address, all messages will be dropped and
// report unreachable. However unreachable won't reset follower's
// progress if it's in Snapshot state. So trying to send a snapshot
// when the address is being resolved will leave follower's progress
// stay in Snapshot forever.
#[test]
fn test_server_snapshot_on_resolve_failure() {
let mut cluster = new_server_cluster(1, 2);
configure_for_snapshot(&mut cluster);
let on_send_store_fp = "transport_on_send_snapshot";
let pd_client = Arc::clone(&cluster.pd_client);
// Disable default max peer count check.
pd_client.disable_default_operator();
cluster.run_conf_change();
cluster.must_put(b"k1", b"v1");
let ready_notify = Arc::default();
let (notify_tx, notify_rx) = mpsc::channel();
cluster.sim.write().unwrap().add_send_filter(
1,
Box::new(MessageTypeNotifier::new(
MessageType::MsgSnapshot,
notify_tx,
Arc::clone(&ready_notify),
)),
);
// "return(2)" those failure occurs if TiKV resolves or sends to store 2.
fail::cfg(on_send_store_fp, "return(2)").unwrap();
pd_client.add_peer(1, new_learner_peer(2, 2));
// We are ready to recv notify.
ready_notify.store(true, Ordering::SeqCst);
notify_rx.recv_timeout(Duration::from_secs(3)).unwrap();
let engine2 = cluster.get_engine(2);
must_get_none(&engine2, b"k1");
// If snapshot status is reported correctly, sending snapshot should be retried.
notify_rx.recv_timeout(Duration::from_secs(3)).unwrap();
}
#[test]
fn test_generate_snapshot() {
let mut cluster = new_server_cluster(1, 5);
cluster.cfg.raft_store.raft_log_gc_tick_interval = ReadableDuration::millis(20);
cluster.cfg.raft_store.raft_log_gc_count_limit = Some(8);
cluster.cfg.raft_store.merge_max_log_gap = 3;
let pd_client = Arc::clone(&cluster.pd_client);
pd_client.disable_default_operator();
cluster.run();
cluster.must_transfer_leader(1, new_peer(1, 1));
cluster.stop_node(4);
cluster.stop_node(5);
(0..10).for_each(|_| cluster.must_put(b"k2", b"v2"));
// Sleep for a while to ensure all logs are compacted.
thread::sleep(Duration::from_millis(100));
fail::cfg("snapshot_delete_after_send", "pause").unwrap();
// Let store 4 inform leader to generate a snapshot.
cluster.run_node(4).unwrap();
must_get_equal(&cluster.get_engine(4), b"k2", b"v2");
fail::cfg("snapshot_enter_do_build", "pause").unwrap();
cluster.run_node(5).unwrap();
thread::sleep(Duration::from_millis(100));
fail::cfg("snapshot_delete_after_send", "off").unwrap();
must_empty_dir(cluster.get_snap_dir(1));
// The task is droped so that we can't get the snapshot on store 5.
fail::cfg("snapshot_enter_do_build", "pause").unwrap();
must_get_none(&cluster.get_engine(5), b"k2");
fail::cfg("snapshot_enter_do_build", "off").unwrap();
must_get_equal(&cluster.get_engine(5), b"k2", b"v2");
fail::remove("snapshot_enter_do_build");
fail::remove("snapshot_delete_after_send");
}
fn must_empty_dir(path: String) {
for _ in 0..500 {
thread::sleep(Duration::from_millis(10));
if fs::read_dir(&path).unwrap().count() == 0 {
return;
}
}
let entries = fs::read_dir(&path)
.and_then(|dir| dir.collect::<io::Result<Vec<_>>>())
.unwrap();
if !entries.is_empty() {
panic!(
"the directory {:?} should be empty, but has entries: {:?}",
path, entries
);
}
}
fn assert_snapshot(snap_dir: &str, region_id: u64, exist: bool) {
let region_id = format!("{}", region_id);
let timer = Instant::now();
loop {
for p in fs::read_dir(&snap_dir).unwrap() {
let name = p.unwrap().file_name().into_string().unwrap();
let mut parts = name.split('_');
parts.next();
if parts.next().unwrap() == region_id && exist {
return;
}
}
if !exist {
return;
}
if timer.saturating_elapsed() < Duration::from_secs(6) {
thread::sleep(Duration::from_millis(20));
} else {
panic!(
"assert snapshot [exist: {}, region: {}] fail",
exist, region_id
);
}
}
}
// A peer on store 3 is isolated and is applying snapshot. (add failpoint so it's always pending)
// Then two conf change happens, this peer is removed and a new peer is added on store 3.
// Then isolation clear, this peer will be destroyed because of a bigger peer id in msg.
// In previous implementation, peer fsm can be destroyed synchronously because snapshot state is
// pending and can be canceled, but panic may happen if the applyfsm runs very slow.
#[test]
fn test_destroy_peer_on_pending_snapshot() {
let mut cluster = new_server_cluster(0, 3);
configure_for_snapshot(&mut cluster);
let pd_client = Arc::clone(&cluster.pd_client);
pd_client.disable_default_operator();
fail::cfg_callback("engine_rocks_raft_engine_clean_seek", move || {
if std::thread::current().name().unwrap().contains("raftstore") {
panic!("seek should not happen in raftstore threads");
}
})
.unwrap();
let r1 = cluster.run_conf_change();
pd_client.must_add_peer(r1, new_peer(2, 2));
pd_client.must_add_peer(r1, new_peer(3, 3));
cluster.must_put(b"k1", b"v1");
// Ensure peer 3 is initialized.
must_get_equal(&cluster.get_engine(3), b"k1", b"v1");
cluster.must_transfer_leader(1, new_peer(1, 1));
cluster.add_send_filter(IsolationFilterFactory::new(3));
for i in 0..20 {
cluster.must_put(format!("k1{}", i).as_bytes(), b"v1");
}
let apply_snapshot_fp = "apply_pending_snapshot";
fail::cfg(apply_snapshot_fp, "return()").unwrap();
cluster.clear_send_filters();
// Wait for leader send snapshot.
sleep_ms(100);
cluster.add_send_filter(IsolationFilterFactory::new(3));
// Don't send check stale msg to PD
let peer_check_stale_state_fp = "peer_check_stale_state";
fail::cfg(peer_check_stale_state_fp, "return()").unwrap();
pd_client.must_remove_peer(r1, new_peer(3, 3));
pd_client.must_add_peer(r1, new_peer(3, 4));
let before_handle_normal_3_fp = "before_handle_normal_3";
fail::cfg(before_handle_normal_3_fp, "pause").unwrap();
cluster.clear_send_filters();
// Wait for leader send msg to peer 3.
// Then destroy peer 3 and create peer 4.
sleep_ms(100);
fail::remove(apply_snapshot_fp);
fail::remove(before_handle_normal_3_fp);
cluster.must_put(b"k120", b"v1");
// After peer 4 has applied snapshot, data should be got.
must_get_equal(&cluster.get_engine(3), b"k120", b"v1");
}
// The peer 3 in store 3 is isolated for a while and then recovered.
// During its applying snapshot, however the peer is destroyed and thus applying snapshot is canceled.
// And when it's destroyed (destroy is not finished either), the machine restarted.
// After the restart, the snapshot should be applied successfully.println!
// And new data should be written to store 3 successfully.
#[test]
fn test_destroy_peer_on_pending_snapshot_and_restart() {
let mut cluster = new_server_cluster(0, 3);
configure_for_snapshot(&mut cluster);
let pd_client = Arc::clone(&cluster.pd_client);
pd_client.disable_default_operator();
let r1 = cluster.run_conf_change();
pd_client.must_add_peer(r1, new_peer(2, 2));
pd_client.must_add_peer(r1, new_peer(3, 3));
cluster.must_put(b"k1", b"v1");
// Ensure peer 3 is initialized.
must_get_equal(&cluster.get_engine(3), b"k1", b"v1");
cluster.must_transfer_leader(1, new_peer(1, 1));
let destroy_peer_fp = "destroy_peer_after_pending_move";
fail::cfg(destroy_peer_fp, "return(true)").unwrap();
cluster.add_send_filter(IsolationFilterFactory::new(3));
for i in 0..20 {
cluster.must_put(format!("k1{}", i).as_bytes(), b"v1");
}
// skip applying snapshot into RocksDB to keep peer status is Applying
let apply_snapshot_fp = "apply_pending_snapshot";
fail::cfg(apply_snapshot_fp, "return()").unwrap();
cluster.clear_send_filters();
// Wait for leader send snapshot.
sleep_ms(100);
// Don't send check stale msg to PD
let peer_check_stale_state_fp = "peer_check_stale_state";
fail::cfg(peer_check_stale_state_fp, "return()").unwrap();
pd_client.must_remove_peer(r1, new_peer(3, 3));
// Without it, pd_client.must_remove_peer does not trigger destroy_peer!
pd_client.must_add_peer(r1, new_peer(3, 4));
let before_handle_normal_3_fp = "before_handle_normal_3";
// to pause ApplyTaskRes::Destroy so that peer gc could finish
fail::cfg(before_handle_normal_3_fp, "pause").unwrap();
// Wait for leader send msg to peer 3.
// Then destroy peer 3
sleep_ms(100);
fail::remove(before_handle_normal_3_fp); // allow destroy run
// restart node 3
cluster.stop_node(3);
fail::remove(apply_snapshot_fp);
fail::remove(peer_check_stale_state_fp);
fail::remove(destroy_peer_fp);
cluster.run_node(3).unwrap();
must_get_equal(&cluster.get_engine(3), b"k1", b"v1");
// After peer 3 has applied snapshot, data should be got.
must_get_equal(&cluster.get_engine(3), b"k119", b"v1");
// In the end the snapshot file should be gc-ed anyway, either by new peer or by store
let now = Instant::now();
loop {
let mut snap_files = vec![];
let snap_dir = cluster.get_snap_dir(3);
// snapfiles should be gc.
snap_files.extend(fs::read_dir(snap_dir).unwrap().map(|p| p.unwrap().path()));
if snap_files.is_empty() {
break;
}
if now.saturating_elapsed() > Duration::from_secs(5) {
panic!("snap files are not gc-ed");
}
sleep_ms(20);
}
cluster.must_put(b"k120", b"v1");
// new data should be replicated to peer 4 in store 3
must_get_equal(&cluster.get_engine(3), b"k120", b"v1");
}
#[test]
fn test_shutdown_when_snap_gc() {
let mut cluster = new_node_cluster(0, 2);
// So that batch system can handle a snap_gc event before shutting down.
cluster.cfg.raft_store.store_batch_system.max_batch_size = Some(1);
cluster.cfg.raft_store.snap_mgr_gc_tick_interval = ReadableDuration::millis(20);
let pd_client = Arc::clone(&cluster.pd_client);
pd_client.disable_default_operator();
let r1 = cluster.run_conf_change();
// Only save a snapshot on peer 2, but do not apply it really.
fail::cfg("skip_schedule_applying_snapshot", "return").unwrap();
pd_client.must_add_peer(r1, new_learner_peer(2, 2));
// Snapshot directory on store 2 shouldn't be empty.
let snap_dir = cluster.get_snap_dir(2);
for i in 0..=100 {
if i == 100 {
panic!("store 2 snap dir must not be empty");
}
let dir = fs::read_dir(&snap_dir).unwrap();
if dir.count() > 0 {
break;
}
sleep_ms(10);
}
fail::cfg("peer_2_handle_snap_mgr_gc", "pause").unwrap();
std::thread::spawn(|| {
// Sleep a while to wait snap_gc event to reach batch system.
sleep_ms(500);
fail::cfg("peer_2_handle_snap_mgr_gc", "off").unwrap();
});
sleep_ms(100);
cluster.stop_node(2);
let snap_dir = cluster.get_snap_dir(2);
let dir = fs::read_dir(&snap_dir).unwrap();
if dir.count() == 0 {
panic!("store 2 snap dir must not be empty");
}
}
// Test if a peer handle the old snapshot properly.
#[test]
fn test_receive_old_snapshot() {
let mut cluster = new_node_cluster(0, 3);
configure_for_snapshot(&mut cluster);
cluster.cfg.raft_store.right_derive_when_split = true;
let pd_client = Arc::clone(&cluster.pd_client);
pd_client.disable_default_operator();
let r1 = cluster.run_conf_change();
// Bypass the snapshot gc because the snapshot may be used twice.
let peer_2_handle_snap_mgr_gc_fp = "peer_2_handle_snap_mgr_gc";
fail::cfg(peer_2_handle_snap_mgr_gc_fp, "return()").unwrap();
pd_client.must_add_peer(r1, new_peer(2, 2));
pd_client.must_add_peer(r1, new_peer(3, 3));
cluster.must_transfer_leader(r1, new_peer(1, 1));
cluster.must_put(b"k00", b"v1");
// Ensure peer 2 is initialized.
must_get_equal(&cluster.get_engine(2), b"k00", b"v1");
cluster.add_send_filter(IsolationFilterFactory::new(2));
for i in 0..20 {
cluster.must_put(format!("k{}", i).as_bytes(), b"v1");
}
let dropped_msgs = Arc::new(Mutex::new(Vec::new()));
let recv_filter = Box::new(
RegionPacketFilter::new(r1, 2)
.direction(Direction::Recv)
.msg_type(MessageType::MsgSnapshot)
.reserve_dropped(Arc::clone(&dropped_msgs)),
);
cluster.sim.wl().add_recv_filter(2, recv_filter);
cluster.clear_send_filters();
for _ in 0..20 {
let guard = dropped_msgs.lock().unwrap();
if !guard.is_empty() {
break;
}
drop(guard);
sleep_ms(10);
}
let msgs: Vec<_> = {
let mut guard = dropped_msgs.lock().unwrap();
if guard.is_empty() {
drop(guard);
panic!("do not receive snapshot msg in 200ms");
}
std::mem::take(guard.as_mut())
};
cluster.sim.wl().clear_recv_filters(2);
for i in 20..40 {
cluster.must_put(format!("k{}", i).as_bytes(), b"v1");
}
must_get_equal(&cluster.get_engine(2), b"k39", b"v1");
let router = cluster.sim.wl().get_router(2).unwrap();
// Send the old snapshot
for raft_msg in msgs {
router.send_raft_message(raft_msg).unwrap();
}
cluster.must_put(b"k40", b"v1");
must_get_equal(&cluster.get_engine(2), b"k40", b"v1");
pd_client.must_remove_peer(r1, new_peer(2, 2));
must_get_none(&cluster.get_engine(2), b"k40");
let region = cluster.get_region(b"k1");
cluster.must_split(®ion, b"k5");
let left = cluster.get_region(b"k1");
pd_client.must_add_peer(left.get_id(), new_peer(2, 4));
cluster.must_put(b"k11", b"v1");
// If peer 2 handles previous old snapshot properly and does not leave over metadata
// in `pending_snapshot_regions`, peer 4 should be created normally.
must_get_equal(&cluster.get_engine(2), b"k11", b"v1");
fail::remove(peer_2_handle_snap_mgr_gc_fp);
}
/// Test if snapshot can be genereated when there is a ready with no newly
/// committed entries.
/// The failpoint `before_no_ready_gen_snap_task` is used for skipping
/// the code path that snapshot is generated when there is no ready.
#[test]
fn test_gen_snapshot_with_no_committed_entries_ready() {
let mut cluster = new_node_cluster(0, 3);
configure_for_snapshot(&mut cluster);
let pd_client = Arc::clone(&cluster.pd_client);
pd_client.disable_default_operator();
let on_raft_gc_log_tick_fp = "on_raft_gc_log_tick";
fail::cfg(on_raft_gc_log_tick_fp, "return()").unwrap();
let before_no_ready_gen_snap_task_fp = "before_no_ready_gen_snap_task";
fail::cfg(before_no_ready_gen_snap_task_fp, "return()").unwrap();
cluster.run();
cluster.add_send_filter(IsolationFilterFactory::new(3));
for i in 1..10 {
cluster.must_put(format!("k{}", i).as_bytes(), b"v1");
}
fail::remove(on_raft_gc_log_tick_fp);
sleep_ms(100);
cluster.clear_send_filters();
// Snapshot should be generated and sent after leader 1 receives the heartbeat
// response from peer 3.
must_get_equal(&cluster.get_engine(3), b"k9", b"v1");
}
// Test snapshot generating can be canceled by Raft log GC correctly. It does
// 1. pause snapshot generating with a failpoint, and then add a new peer;
// 2. append more Raft logs to the region to trigger raft log compactions;
// 3. disable the failpoint to continue snapshot generating;
// 4. the generated snapshot should have a larger index than the latest `truncated_idx`.
#[test]
fn test_cancel_snapshot_generating() {
let mut cluster = new_node_cluster(0, 5);
cluster.cfg.raft_store.snap_mgr_gc_tick_interval = ReadableDuration(Duration::from_secs(100));
cluster.cfg.raft_store.raft_log_gc_tick_interval = ReadableDuration::millis(10);
cluster.cfg.raft_store.raft_log_gc_count_limit = Some(10);
cluster.cfg.raft_store.merge_max_log_gap = 5;
let pd_client = Arc::clone(&cluster.pd_client);
pd_client.disable_default_operator();
let rid = cluster.run_conf_change();
let snap_dir = cluster.get_snap_dir(1);
pd_client.must_add_peer(rid, new_peer(2, 2));
cluster.must_put(b"k0", b"v0");
must_get_equal(&cluster.get_engine(2), b"k0", b"v0");
pd_client.must_add_peer(rid, new_peer(3, 3));
cluster.must_put(b"k1", b"v1");
must_get_equal(&cluster.get_engine(3), b"k1", b"v1");
// Remove snapshot files generated for initial configuration changes.
for entry in fs::read_dir(&snap_dir).unwrap() {
let entry = entry.unwrap();
fs::remove_file(entry.path()).unwrap();
}
fail::cfg("before_region_gen_snap", "pause").unwrap();
pd_client.must_add_peer(rid, new_learner_peer(4, 4));
// Snapshot generatings will be canceled by raft log GC.
let mut truncated_idx = cluster.truncated_state(rid, 1).get_index();
truncated_idx += 20;
(0..20).for_each(|_| cluster.must_put(b"kk", b"vv"));
cluster.wait_log_truncated(rid, 1, truncated_idx);
fail::cfg("before_region_gen_snap", "off").unwrap();
// Wait for all snapshot generating tasks are consumed.
thread::sleep(Duration::from_millis(100));
// New generated snapshot files should have a larger index than truncated index.
for entry in fs::read_dir(&snap_dir).unwrap() {
let entry = entry.unwrap();
let path = entry.path();
let file_name = path.file_name().unwrap().to_str().unwrap();
if !file_name.ends_with(".meta") {
continue;
}
let parts: Vec<_> = file_name[0..file_name.len() - 5].split('_').collect();
let snap_index = parts[3].parse::<u64>().unwrap();
assert!(snap_index > truncated_idx);
}
}
#[test]
fn test_snapshot_gc_after_failed() {
let mut cluster = new_server_cluster(0, 3);
configure_for_snapshot(&mut cluster);
cluster.cfg.raft_store.snap_gc_timeout = ReadableDuration::millis(300);
let pd_client = Arc::clone(&cluster.pd_client);
// Disable default max peer count check.
pd_client.disable_default_operator();
let r1 = cluster.run_conf_change();
cluster.must_put(b"k1", b"v1");
pd_client.must_add_peer(r1, new_peer(2, 2));
must_get_equal(&cluster.get_engine(2), b"k1", b"v1");
pd_client.must_add_peer(r1, new_peer(3, 3));
let snap_dir = cluster.get_snap_dir(3);
fail::cfg("get_snapshot_for_gc", "return(0)").unwrap();
for idx in 1..3 {
// idx 1 will fail in fail_point("get_snapshot_for_gc"), but idx 2 will succeed
for suffix in &[".meta", "_default.sst"] {
let f = format!("gen_{}_{}_{}{}", 2, 6, idx, suffix);
let mut snap_file_path = PathBuf::from(&snap_dir);
snap_file_path.push(&f);
let snap_file_path = snap_file_path.as_path();
let mut file = match File::create(&snap_file_path) {
Err(why) => panic!("couldn't create {:?}: {}", snap_file_path, why),
Ok(file) => file,
};
// write any data, in fact we don't check snapshot file corrupted or not in GC;
if let Err(why) = file.write_all(b"some bytes") {
panic!("couldn't write to {:?}: {}", snap_file_path, why)
}
}
}
let now = Instant::now();
loop {
let snap_keys = cluster.get_snap_mgr(3).list_idle_snap().unwrap();
if snap_keys.is_empty() {
panic!("no snapshot file is found");
}
let mut found_unexpected_file = false;
let mut found_expected_file = false;
for (snap_key, _is_sending) in snap_keys {
if snap_key.region_id == 2 && snap_key.idx == 1 {
found_expected_file = true;
}
if snap_key.idx == 2 && snap_key.region_id == 2 {
if now.saturating_elapsed() > Duration::from_secs(10) {
panic!("unexpected snapshot file found. {:?}", snap_key);
}
found_unexpected_file = true;
break;
}
}
if !found_expected_file {
panic!("The expected snapshot file is not found");
}
if !found_unexpected_file {
break;
}
sleep_ms(400);
}
fail::cfg("get_snapshot_for_gc", "off").unwrap();
cluster.sim.wl().clear_recv_filters(3);
}
#[test]
fn test_sending_fail_with_net_error() {
let mut cluster = new_server_cluster(1, 2);
configure_for_snapshot(&mut cluster);
cluster.cfg.raft_store.snap_gc_timeout = ReadableDuration::millis(300);
let pd_client = Arc::clone(&cluster.pd_client);
// Disable default max peer count check.
pd_client.disable_default_operator();
let r1 = cluster.run_conf_change();
cluster.must_put(b"k1", b"v1");
let (send_tx, send_rx) = mpsc::sync_channel(1);
// only send one MessageType::MsgSnapshot message
cluster.sim.wl().add_send_filter(
1,
Box::new(
RegionPacketFilter::new(r1, 1)
.allow(1)
.direction(Direction::Send)
.msg_type(MessageType::MsgSnapshot)
.set_msg_callback(Arc::new(move |m: &RaftMessage| {
if m.get_message().get_msg_type() == MessageType::MsgSnapshot {
let _ = send_tx.send(());
}
})),
),
);
// peer2 will interrupt in receiving snapshot
fail::cfg("receiving_snapshot_net_error", "return()").unwrap();
pd_client.must_add_peer(r1, new_learner_peer(2, 2));
// ready to send notify.
send_rx.recv_timeout(Duration::from_secs(3)).unwrap();
// need to wait receiver handle the snapshot request
sleep_ms(100);
// peer2 will not become learner so ti will has k1 key and receiving count will zero
let engine2 = cluster.get_engine(2);
must_get_none(&engine2, b"k1");
assert_eq!(cluster.get_snap_mgr(2).stats().receiving_count, 0);
}
/// Logs scan are now moved to raftlog gc threads. The case is to test if logs
/// are still cleaned up when there is stale logs before first index during applying
/// snapshot. It's expected to schedule a gc task after applying snapshot.
#[test]
fn test_snapshot_clean_up_logs_with_unfinished_log_gc() {
let mut cluster = new_node_cluster(0, 3);
cluster.cfg.raft_store.raft_log_gc_count_limit = Some(15);
cluster.cfg.raft_store.raft_log_gc_threshold = 15;
// Speed up log gc.
cluster.cfg.raft_store.raft_log_compact_sync_interval = ReadableDuration::millis(1);
let pd_client = cluster.pd_client.clone();
// Disable default max peer number check.
pd_client.disable_default_operator();
cluster.run();
// Simulate raft log gc are pending in queue.
let fp = "worker_gc_raft_log";
fail::cfg(fp, "return(0)").unwrap();
let state = cluster.truncated_state(1, 3);
for i in 0..30 {
let b = format!("k{}", i).into_bytes();
cluster.must_put(&b, &b);
}
must_get_equal(&cluster.get_engine(3), b"k29", b"k29");
cluster.wait_log_truncated(1, 3, state.get_index() + 1);
cluster.stop_node(3);
let truncated_index = cluster.truncated_state(1, 3).get_index();
let raft_engine = cluster.engines[&3].raft.clone();
// Make sure there are stale logs.
raft_engine.get_entry(1, truncated_index).unwrap().unwrap();
let last_index = cluster.raft_local_state(1, 3).get_last_index();
for i in 30..60 {
let b = format!("k{}", i).into_bytes();
cluster.must_put(&b, &b);
}
cluster.wait_log_truncated(1, 2, last_index + 1);
fail::remove(fp);
// So peer (3, 3) will accept a snapshot. And all stale logs before first
// index should be cleaned up.
cluster.run_node(3).unwrap();
must_get_equal(&cluster.get_engine(3), b"k59", b"k59");
cluster.must_put(b"k60", b"v60");
must_get_equal(&cluster.get_engine(3), b"k60", b"v60");
let truncated_index = cluster.truncated_state(1, 3).get_index();
let mut dest = vec![];
raft_engine.get_all_entries_to(1, &mut dest).unwrap();
// Only previous log should be cleaned up.
assert!(dest[0].get_index() > truncated_index, "{:?}", dest);
}
| 36.506831 | 102 | 0.655241 |
e59900755bf3175c524ea797532c7ba70112f997 | 77,602 | #![doc = "generated by AutoRust"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::de::{value, Deserializer, IntoDeserializer};
use serde::{Deserialize, Serialize, Serializer};
use std::str::FromStr;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AccessPolicyCreateOrUpdateParameters {
pub properties: AccessPolicyResourceProperties,
}
impl AccessPolicyCreateOrUpdateParameters {
pub fn new(properties: AccessPolicyResourceProperties) -> Self {
Self { properties }
}
}
#[doc = "The response of the List access policies operation."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct AccessPolicyListResponse {
#[doc = "Result of the List access policies operation."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<AccessPolicyResource>,
}
impl AccessPolicyListResponse {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "An object that represents a set of mutable access policy resource properties."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct AccessPolicyMutableProperties {
#[doc = "An description of the access policy."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[doc = "The list of roles the principal is assigned on the environment."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub roles: Vec<String>,
}
impl AccessPolicyMutableProperties {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "An access policy is used to grant users and applications access to the environment. Roles are assigned to service principals in Azure Active Directory. These roles define the actions the principal can perform through the Time Series Insights data plane APIs."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct AccessPolicyResource {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<AccessPolicyResourceProperties>,
}
impl AccessPolicyResource {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct AccessPolicyResourceProperties {
#[doc = "The objectId of the principal in Azure Active Directory."]
#[serde(rename = "principalObjectId", default, skip_serializing_if = "Option::is_none")]
pub principal_object_id: Option<String>,
#[doc = "An description of the access policy."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[doc = "The list of roles the principal is assigned on the environment."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub roles: Vec<String>,
}
impl AccessPolicyResourceProperties {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct AccessPolicyUpdateParameters {
#[doc = "An object that represents a set of mutable access policy resource properties."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<AccessPolicyMutableProperties>,
}
impl AccessPolicyUpdateParameters {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Properties of an event source that reads events from an event broker in Azure."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureEventSourceProperties {
#[serde(flatten)]
pub event_source_common_properties: EventSourceCommonProperties,
#[doc = "The resource id of the event source in Azure Resource Manager."]
#[serde(rename = "eventSourceResourceId")]
pub event_source_resource_id: String,
}
impl AzureEventSourceProperties {
pub fn new(event_source_resource_id: String) -> Self {
Self {
event_source_common_properties: EventSourceCommonProperties::default(),
event_source_resource_id,
}
}
}
#[doc = "Contains information about an API error."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct CloudError {
#[doc = "Describes a particular API error with an error code and a message."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<CloudErrorBody>,
}
impl CloudError {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Describes a particular API error with an error code and a message."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct CloudErrorBody {
#[doc = "An error code that describes the error condition more precisely than an HTTP status code. Can be used to programmatically handle specific error cases."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[doc = "A message that describes the error in detail and provides debugging information."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[doc = "The target of the particular error (for example, the name of the property in error)."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub target: Option<String>,
#[doc = "Contains nested errors that are related to this error."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub details: Vec<CloudErrorBody>,
}
impl CloudErrorBody {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Properties required to create any resource tracked by Azure Resource Manager."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CreateOrUpdateTrackedResourceProperties {
#[doc = "The location of the resource."]
pub location: String,
#[doc = "Key-value pairs of additional properties for the resource."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
}
impl CreateOrUpdateTrackedResourceProperties {
pub fn new(location: String) -> Self {
Self { location, tags: None }
}
}
#[doc = "Parameters supplied to the CreateOrUpdate Environment operation."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EnvironmentCreateOrUpdateParameters {
#[serde(flatten)]
pub create_or_update_tracked_resource_properties: CreateOrUpdateTrackedResourceProperties,
#[doc = "The kind of the environment."]
pub kind: environment_create_or_update_parameters::Kind,
#[doc = "The sku determines the type of environment, either standard (S1 or S2) or long-term (L1). For standard environments the sku determines the capacity of the environment, the ingress rate, and the billing rate."]
pub sku: Sku,
}
impl EnvironmentCreateOrUpdateParameters {
pub fn new(
create_or_update_tracked_resource_properties: CreateOrUpdateTrackedResourceProperties,
kind: environment_create_or_update_parameters::Kind,
sku: Sku,
) -> Self {
Self {
create_or_update_tracked_resource_properties,
kind,
sku,
}
}
}
pub mod environment_create_or_update_parameters {
use super::*;
#[doc = "The kind of the environment."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(remote = "Kind")]
pub enum Kind {
Standard,
LongTerm,
#[serde(skip_deserializing)]
UnknownValue(String),
}
impl FromStr for Kind {
type Err = value::Error;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Self::deserialize(s.into_deserializer())
}
}
impl<'de> Deserialize<'de> for Kind {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
let deserialized = Self::from_str(&s).unwrap_or(Self::UnknownValue(s));
Ok(deserialized)
}
}
impl Serialize for Kind {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
match self {
Self::Standard => serializer.serialize_unit_variant("Kind", 0u32, "Standard"),
Self::LongTerm => serializer.serialize_unit_variant("Kind", 1u32, "LongTerm"),
Self::UnknownValue(s) => serializer.serialize_str(s.as_str()),
}
}
}
}
#[doc = "The response of the List Environments operation."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct EnvironmentListResponse {
#[doc = "Result of the List Environments operation."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<EnvironmentResource>,
}
impl EnvironmentListResponse {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "An environment is a set of time-series data available for query, and is the top level Azure Time Series Insights resource."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EnvironmentResource {
#[serde(flatten)]
pub tracked_resource: TrackedResource,
#[doc = "The sku determines the type of environment, either standard (S1 or S2) or long-term (L1). For standard environments the sku determines the capacity of the environment, the ingress rate, and the billing rate."]
pub sku: Sku,
#[doc = "The kind of the environment."]
pub kind: environment_resource::Kind,
}
impl EnvironmentResource {
pub fn new(tracked_resource: TrackedResource, sku: Sku, kind: environment_resource::Kind) -> Self {
Self {
tracked_resource,
sku,
kind,
}
}
}
pub mod environment_resource {
use super::*;
#[doc = "The kind of the environment."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Kind {
Standard,
LongTerm,
}
}
#[doc = "Properties of the environment."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct EnvironmentResourceProperties {
#[serde(flatten)]
pub resource_properties: ResourceProperties,
#[doc = "An id used to access the environment data, e.g. to query the environment's events or upload reference data for the environment."]
#[serde(rename = "dataAccessId", default, skip_serializing_if = "Option::is_none")]
pub data_access_id: Option<String>,
#[doc = "The fully qualified domain name used to access the environment data, e.g. to query the environment's events or upload reference data for the environment."]
#[serde(rename = "dataAccessFqdn", default, skip_serializing_if = "Option::is_none")]
pub data_access_fqdn: Option<String>,
#[doc = "An object that represents the status of the environment, and its internal state in the Time Series Insights service."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<EnvironmentStatus>,
}
impl EnvironmentResourceProperties {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "An object that contains the details about an environment's state."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct EnvironmentStateDetails {
#[doc = "Contains the code that represents the reason of an environment being in a particular state. Can be used to programmatically handle specific cases."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[doc = "A message that describes the state in detail."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
}
impl EnvironmentStateDetails {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "An object that represents the status of the environment, and its internal state in the Time Series Insights service."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct EnvironmentStatus {
#[doc = "An object that represents the status of ingress on an environment."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub ingress: Option<IngressEnvironmentStatus>,
#[doc = "An object that represents the status of warm storage on an environment."]
#[serde(rename = "warmStorage", default, skip_serializing_if = "Option::is_none")]
pub warm_storage: Option<WarmStorageEnvironmentStatus>,
}
impl EnvironmentStatus {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Parameters supplied to the Update Environment operation."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct EnvironmentUpdateParameters {
#[doc = "Key-value pairs of additional properties for the environment."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
}
impl EnvironmentUpdateParameters {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Properties of the EventHub event source."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EventHubEventSourceCommonProperties {
#[serde(flatten)]
pub azure_event_source_properties: AzureEventSourceProperties,
#[doc = "The name of the service bus that contains the event hub."]
#[serde(rename = "serviceBusNamespace")]
pub service_bus_namespace: String,
#[doc = "The name of the event hub."]
#[serde(rename = "eventHubName")]
pub event_hub_name: String,
#[doc = "The name of the event hub's consumer group that holds the partitions from which events will be read."]
#[serde(rename = "consumerGroupName")]
pub consumer_group_name: String,
#[doc = "The name of the SAS key that grants the Time Series Insights service access to the event hub. The shared access policies for this key must grant 'Listen' permissions to the event hub."]
#[serde(rename = "keyName")]
pub key_name: String,
}
impl EventHubEventSourceCommonProperties {
pub fn new(
azure_event_source_properties: AzureEventSourceProperties,
service_bus_namespace: String,
event_hub_name: String,
consumer_group_name: String,
key_name: String,
) -> Self {
Self {
azure_event_source_properties,
service_bus_namespace,
event_hub_name,
consumer_group_name,
key_name,
}
}
}
#[doc = "Parameters supplied to the Create or Update Event Source operation for an EventHub event source."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EventHubEventSourceCreateOrUpdateParameters {
#[serde(flatten)]
pub event_source_create_or_update_parameters: EventSourceCreateOrUpdateParameters,
#[doc = "Properties of the EventHub event source that are required on create or update requests."]
pub properties: EventHubEventSourceCreationProperties,
}
impl EventHubEventSourceCreateOrUpdateParameters {
pub fn new(
event_source_create_or_update_parameters: EventSourceCreateOrUpdateParameters,
properties: EventHubEventSourceCreationProperties,
) -> Self {
Self {
event_source_create_or_update_parameters,
properties,
}
}
}
#[doc = "Properties of the EventHub event source that are required on create or update requests."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EventHubEventSourceCreationProperties {
#[serde(flatten)]
pub event_hub_event_source_common_properties: EventHubEventSourceCommonProperties,
#[doc = "The value of the shared access key that grants the Time Series Insights service read access to the event hub. This property is not shown in event source responses."]
#[serde(rename = "sharedAccessKey")]
pub shared_access_key: String,
}
impl EventHubEventSourceCreationProperties {
pub fn new(event_hub_event_source_common_properties: EventHubEventSourceCommonProperties, shared_access_key: String) -> Self {
Self {
event_hub_event_source_common_properties,
shared_access_key,
}
}
}
#[doc = "An object that represents a set of mutable EventHub event source resource properties."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct EventHubEventSourceMutableProperties {
#[serde(flatten)]
pub event_source_mutable_properties: EventSourceMutableProperties,
#[doc = "The value of the shared access key that grants the Time Series Insights service read access to the event hub. This property is not shown in event source responses."]
#[serde(rename = "sharedAccessKey", default, skip_serializing_if = "Option::is_none")]
pub shared_access_key: Option<String>,
}
impl EventHubEventSourceMutableProperties {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "An event source that receives its data from an Azure EventHub."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EventHubEventSourceResource {
#[serde(flatten)]
pub event_source_resource: EventSourceResource,
#[doc = "Properties of the EventHub event source resource."]
pub properties: EventHubEventSourceResourceProperties,
}
impl EventHubEventSourceResource {
pub fn new(event_source_resource: EventSourceResource, properties: EventHubEventSourceResourceProperties) -> Self {
Self {
event_source_resource,
properties,
}
}
}
#[doc = "Properties of the EventHub event source resource."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EventHubEventSourceResourceProperties {
#[serde(flatten)]
pub event_hub_event_source_common_properties: EventHubEventSourceCommonProperties,
}
impl EventHubEventSourceResourceProperties {
pub fn new(event_hub_event_source_common_properties: EventHubEventSourceCommonProperties) -> Self {
Self {
event_hub_event_source_common_properties,
}
}
}
#[doc = "Parameters supplied to the Update Event Source operation to update an EventHub event source."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct EventHubEventSourceUpdateParameters {
#[serde(flatten)]
pub event_source_update_parameters: EventSourceUpdateParameters,
#[doc = "An object that represents a set of mutable EventHub event source resource properties."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<EventHubEventSourceMutableProperties>,
}
impl EventHubEventSourceUpdateParameters {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Properties of the event source."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct EventSourceCommonProperties {
#[serde(flatten)]
pub resource_properties: ResourceProperties,
#[doc = "The event property that will be used as the event source's timestamp. If a value isn't specified for timestampPropertyName, or if null or empty-string is specified, the event creation time will be used."]
#[serde(rename = "timestampPropertyName", default, skip_serializing_if = "Option::is_none")]
pub timestamp_property_name: Option<String>,
}
impl EventSourceCommonProperties {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Parameters supplied to the Create or Update Event Source operation."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EventSourceCreateOrUpdateParameters {
#[serde(flatten)]
pub create_or_update_tracked_resource_properties: CreateOrUpdateTrackedResourceProperties,
#[doc = "The kind of the event source."]
pub kind: event_source_create_or_update_parameters::Kind,
#[doc = "An object that represents the local timestamp property. It contains the format of local timestamp that needs to be used and the corresponding timezone offset information. If a value isn't specified for localTimestamp, or if null, then the local timestamp will not be ingressed with the events."]
#[serde(rename = "localTimestamp", default, skip_serializing_if = "Option::is_none")]
pub local_timestamp: Option<LocalTimestamp>,
}
impl EventSourceCreateOrUpdateParameters {
pub fn new(
create_or_update_tracked_resource_properties: CreateOrUpdateTrackedResourceProperties,
kind: event_source_create_or_update_parameters::Kind,
) -> Self {
Self {
create_or_update_tracked_resource_properties,
kind,
local_timestamp: None,
}
}
}
pub mod event_source_create_or_update_parameters {
use super::*;
#[doc = "The kind of the event source."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(remote = "Kind")]
pub enum Kind {
#[serde(rename = "Microsoft.EventHub")]
MicrosoftEventHub,
#[serde(rename = "Microsoft.IoTHub")]
MicrosoftIoTHub,
#[serde(skip_deserializing)]
UnknownValue(String),
}
impl FromStr for Kind {
type Err = value::Error;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Self::deserialize(s.into_deserializer())
}
}
impl<'de> Deserialize<'de> for Kind {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
let deserialized = Self::from_str(&s).unwrap_or(Self::UnknownValue(s));
Ok(deserialized)
}
}
impl Serialize for Kind {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
match self {
Self::MicrosoftEventHub => serializer.serialize_unit_variant("Kind", 0u32, "Microsoft.EventHub"),
Self::MicrosoftIoTHub => serializer.serialize_unit_variant("Kind", 1u32, "Microsoft.IoTHub"),
Self::UnknownValue(s) => serializer.serialize_str(s.as_str()),
}
}
}
}
#[doc = "The response of the List EventSources operation."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct EventSourceListResponse {
#[doc = "Result of the List EventSources operation."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<EventSourceResource>,
}
impl EventSourceListResponse {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "An object that represents a set of mutable event source resource properties."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct EventSourceMutableProperties {
#[doc = "The event property that will be used as the event source's timestamp. If a value isn't specified for timestampPropertyName, or if null or empty-string is specified, the event creation time will be used."]
#[serde(rename = "timestampPropertyName", default, skip_serializing_if = "Option::is_none")]
pub timestamp_property_name: Option<String>,
#[doc = "An object that represents the local timestamp property. It contains the format of local timestamp that needs to be used and the corresponding timezone offset information. If a value isn't specified for localTimestamp, or if null, then the local timestamp will not be ingressed with the events."]
#[serde(rename = "localTimestamp", default, skip_serializing_if = "Option::is_none")]
pub local_timestamp: Option<LocalTimestamp>,
}
impl EventSourceMutableProperties {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "An environment receives data from one or more event sources. Each event source has associated connection info that allows the Time Series Insights ingress pipeline to connect to and pull data from the event source"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EventSourceResource {
#[serde(flatten)]
pub tracked_resource: TrackedResource,
#[doc = "The kind of the event source."]
pub kind: event_source_resource::Kind,
}
impl EventSourceResource {
pub fn new(tracked_resource: TrackedResource, kind: event_source_resource::Kind) -> Self {
Self { tracked_resource, kind }
}
}
pub mod event_source_resource {
use super::*;
#[doc = "The kind of the event source."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Kind {
#[serde(rename = "Microsoft.EventHub")]
MicrosoftEventHub,
#[serde(rename = "Microsoft.IoTHub")]
MicrosoftIoTHub,
}
}
#[doc = "Parameters supplied to the Update Event Source operation."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct EventSourceUpdateParameters {
#[doc = "Key-value pairs of additional properties for the event source."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
}
impl EventSourceUpdateParameters {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "An object that represents the status of ingress on an environment."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct IngressEnvironmentStatus {
#[doc = "This string represents the state of ingress operations on an environment. It can be \"Disabled\", \"Ready\", \"Running\", \"Paused\" or \"Unknown\""]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub state: Option<ingress_environment_status::State>,
#[doc = "An object that contains the details about an environment's state."]
#[serde(rename = "stateDetails", default, skip_serializing_if = "Option::is_none")]
pub state_details: Option<EnvironmentStateDetails>,
}
impl IngressEnvironmentStatus {
pub fn new() -> Self {
Self::default()
}
}
pub mod ingress_environment_status {
use super::*;
#[doc = "This string represents the state of ingress operations on an environment. It can be \"Disabled\", \"Ready\", \"Running\", \"Paused\" or \"Unknown\""]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(remote = "State")]
pub enum State {
Disabled,
Ready,
Running,
Paused,
Unknown,
#[serde(skip_deserializing)]
UnknownValue(String),
}
impl FromStr for State {
type Err = value::Error;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Self::deserialize(s.into_deserializer())
}
}
impl<'de> Deserialize<'de> for State {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
let deserialized = Self::from_str(&s).unwrap_or(Self::UnknownValue(s));
Ok(deserialized)
}
}
impl Serialize for State {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
match self {
Self::Disabled => serializer.serialize_unit_variant("State", 0u32, "Disabled"),
Self::Ready => serializer.serialize_unit_variant("State", 1u32, "Ready"),
Self::Running => serializer.serialize_unit_variant("State", 2u32, "Running"),
Self::Paused => serializer.serialize_unit_variant("State", 3u32, "Paused"),
Self::Unknown => serializer.serialize_unit_variant("State", 4u32, "Unknown"),
Self::UnknownValue(s) => serializer.serialize_str(s.as_str()),
}
}
}
}
#[doc = "Properties of the IoTHub event source."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IoTHubEventSourceCommonProperties {
#[serde(flatten)]
pub azure_event_source_properties: AzureEventSourceProperties,
#[doc = "The name of the iot hub."]
#[serde(rename = "iotHubName")]
pub iot_hub_name: String,
#[doc = "The name of the iot hub's consumer group that holds the partitions from which events will be read."]
#[serde(rename = "consumerGroupName")]
pub consumer_group_name: String,
#[doc = "The name of the Shared Access Policy key that grants the Time Series Insights service access to the iot hub. This shared access policy key must grant 'service connect' permissions to the iot hub."]
#[serde(rename = "keyName")]
pub key_name: String,
}
impl IoTHubEventSourceCommonProperties {
pub fn new(
azure_event_source_properties: AzureEventSourceProperties,
iot_hub_name: String,
consumer_group_name: String,
key_name: String,
) -> Self {
Self {
azure_event_source_properties,
iot_hub_name,
consumer_group_name,
key_name,
}
}
}
#[doc = "Parameters supplied to the Create or Update Event Source operation for an IoTHub event source."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IoTHubEventSourceCreateOrUpdateParameters {
#[serde(flatten)]
pub event_source_create_or_update_parameters: EventSourceCreateOrUpdateParameters,
#[doc = "Properties of the IoTHub event source that are required on create or update requests."]
pub properties: IoTHubEventSourceCreationProperties,
}
impl IoTHubEventSourceCreateOrUpdateParameters {
pub fn new(
event_source_create_or_update_parameters: EventSourceCreateOrUpdateParameters,
properties: IoTHubEventSourceCreationProperties,
) -> Self {
Self {
event_source_create_or_update_parameters,
properties,
}
}
}
#[doc = "Properties of the IoTHub event source that are required on create or update requests."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IoTHubEventSourceCreationProperties {
#[serde(flatten)]
pub io_t_hub_event_source_common_properties: IoTHubEventSourceCommonProperties,
#[doc = "The value of the Shared Access Policy key that grants the Time Series Insights service read access to the iot hub. This property is not shown in event source responses."]
#[serde(rename = "sharedAccessKey")]
pub shared_access_key: String,
}
impl IoTHubEventSourceCreationProperties {
pub fn new(io_t_hub_event_source_common_properties: IoTHubEventSourceCommonProperties, shared_access_key: String) -> Self {
Self {
io_t_hub_event_source_common_properties,
shared_access_key,
}
}
}
#[doc = "An object that represents a set of mutable IoTHub event source resource properties."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct IoTHubEventSourceMutableProperties {
#[serde(flatten)]
pub event_source_mutable_properties: EventSourceMutableProperties,
#[doc = "The value of the shared access key that grants the Time Series Insights service read access to the iot hub. This property is not shown in event source responses."]
#[serde(rename = "sharedAccessKey", default, skip_serializing_if = "Option::is_none")]
pub shared_access_key: Option<String>,
}
impl IoTHubEventSourceMutableProperties {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "An event source that receives its data from an Azure IoTHub."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IoTHubEventSourceResource {
#[serde(flatten)]
pub event_source_resource: EventSourceResource,
#[doc = "Properties of the IoTHub event source resource."]
pub properties: IoTHubEventSourceResourceProperties,
}
impl IoTHubEventSourceResource {
pub fn new(event_source_resource: EventSourceResource, properties: IoTHubEventSourceResourceProperties) -> Self {
Self {
event_source_resource,
properties,
}
}
}
#[doc = "Properties of the IoTHub event source resource."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IoTHubEventSourceResourceProperties {
#[serde(flatten)]
pub io_t_hub_event_source_common_properties: IoTHubEventSourceCommonProperties,
}
impl IoTHubEventSourceResourceProperties {
pub fn new(io_t_hub_event_source_common_properties: IoTHubEventSourceCommonProperties) -> Self {
Self {
io_t_hub_event_source_common_properties,
}
}
}
#[doc = "Parameters supplied to the Update Event Source operation to update an IoTHub event source."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct IoTHubEventSourceUpdateParameters {
#[serde(flatten)]
pub event_source_update_parameters: EventSourceUpdateParameters,
#[doc = "An object that represents a set of mutable IoTHub event source resource properties."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<IoTHubEventSourceMutableProperties>,
}
impl IoTHubEventSourceUpdateParameters {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "An object that represents the local timestamp property. It contains the format of local timestamp that needs to be used and the corresponding timezone offset information. If a value isn't specified for localTimestamp, or if null, then the local timestamp will not be ingressed with the events."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct LocalTimestamp {
#[doc = "An enum that represents the format of the local timestamp property that needs to be set."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub format: Option<local_timestamp::Format>,
#[doc = "An object that represents the offset information for the local timestamp format specified. Should not be specified for LocalTimestampFormat - Embedded."]
#[serde(rename = "timeZoneOffset", default, skip_serializing_if = "Option::is_none")]
pub time_zone_offset: Option<local_timestamp::TimeZoneOffset>,
}
impl LocalTimestamp {
pub fn new() -> Self {
Self::default()
}
}
pub mod local_timestamp {
use super::*;
#[doc = "An enum that represents the format of the local timestamp property that needs to be set."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(remote = "Format")]
pub enum Format {
Embedded,
#[serde(skip_deserializing)]
UnknownValue(String),
}
impl FromStr for Format {
type Err = value::Error;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Self::deserialize(s.into_deserializer())
}
}
impl<'de> Deserialize<'de> for Format {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
let deserialized = Self::from_str(&s).unwrap_or(Self::UnknownValue(s));
Ok(deserialized)
}
}
impl Serialize for Format {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
match self {
Self::Embedded => serializer.serialize_unit_variant("Format", 0u32, "Embedded"),
Self::UnknownValue(s) => serializer.serialize_str(s.as_str()),
}
}
}
#[doc = "An object that represents the offset information for the local timestamp format specified. Should not be specified for LocalTimestampFormat - Embedded."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct TimeZoneOffset {
#[doc = "The event property that will be contain the offset information to calculate the local timestamp. When the LocalTimestampFormat is Iana, the property name will contain the name of the column which contains IANA Timezone Name (eg: Americas/Los Angeles). When LocalTimestampFormat is Timespan, it contains the name of property which contains values representing the offset (eg: P1D or 1.00:00:00)"]
#[serde(rename = "propertyName", default, skip_serializing_if = "Option::is_none")]
pub property_name: Option<String>,
}
impl TimeZoneOffset {
pub fn new() -> Self {
Self::default()
}
}
}
#[doc = "Parameters supplied to the Create or Update Environment operation for a long-term environment."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LongTermEnvironmentCreateOrUpdateParameters {
#[serde(flatten)]
pub environment_create_or_update_parameters: EnvironmentCreateOrUpdateParameters,
#[doc = "Properties used to create a long-term environment."]
pub properties: LongTermEnvironmentCreationProperties,
}
impl LongTermEnvironmentCreateOrUpdateParameters {
pub fn new(
environment_create_or_update_parameters: EnvironmentCreateOrUpdateParameters,
properties: LongTermEnvironmentCreationProperties,
) -> Self {
Self {
environment_create_or_update_parameters,
properties,
}
}
}
#[doc = "Properties used to create a long-term environment."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LongTermEnvironmentCreationProperties {
#[doc = "The list of event properties which will be used to define the environment's time series id."]
#[serde(rename = "timeSeriesIdProperties")]
pub time_series_id_properties: Vec<TimeSeriesIdProperty>,
#[doc = "The storage configuration provides the connection details that allows the Time Series Insights service to connect to the customer storage account that is used to store the environment's data."]
#[serde(rename = "storageConfiguration")]
pub storage_configuration: LongTermStorageConfigurationInput,
#[doc = "The warm store configuration provides the details to create a warm store cache that will retain a copy of the environment's data available for faster query."]
#[serde(rename = "warmStoreConfiguration", default, skip_serializing_if = "Option::is_none")]
pub warm_store_configuration: Option<WarmStoreConfigurationProperties>,
}
impl LongTermEnvironmentCreationProperties {
pub fn new(time_series_id_properties: Vec<TimeSeriesIdProperty>, storage_configuration: LongTermStorageConfigurationInput) -> Self {
Self {
time_series_id_properties,
storage_configuration,
warm_store_configuration: None,
}
}
}
#[doc = "An object that represents a set of mutable long-term environment resource properties."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct LongTermEnvironmentMutableProperties {
#[doc = "The storage configuration provides the connection details that allows the Time Series Insights service to connect to the customer storage account that is used to store the environment's data."]
#[serde(rename = "storageConfiguration", default, skip_serializing_if = "Option::is_none")]
pub storage_configuration: Option<LongTermStorageConfigurationMutableProperties>,
#[doc = "The warm store configuration provides the details to create a warm store cache that will retain a copy of the environment's data available for faster query."]
#[serde(rename = "warmStoreConfiguration", default, skip_serializing_if = "Option::is_none")]
pub warm_store_configuration: Option<WarmStoreConfigurationProperties>,
}
impl LongTermEnvironmentMutableProperties {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "An environment is a set of time-series data available for query, and is the top level Azure Time Series Insights resource. LongTerm environments do not have set data retention limits."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LongTermEnvironmentResource {
#[serde(flatten)]
pub environment_resource: EnvironmentResource,
#[doc = "Properties of the long-term environment."]
pub properties: LongTermEnvironmentResourceProperties,
}
impl LongTermEnvironmentResource {
pub fn new(environment_resource: EnvironmentResource, properties: LongTermEnvironmentResourceProperties) -> Self {
Self {
environment_resource,
properties,
}
}
}
#[doc = "Properties of the long-term environment."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LongTermEnvironmentResourceProperties {
#[serde(flatten)]
pub environment_resource_properties: EnvironmentResourceProperties,
#[serde(flatten)]
pub resource_properties: ResourceProperties,
#[doc = "The list of event properties which will be used to define the environment's time series id."]
#[serde(rename = "timeSeriesIdProperties")]
pub time_series_id_properties: Vec<TimeSeriesIdProperty>,
#[doc = "The storage configuration provides the non-secret connection details about the customer storage account that is used to store the environment's data."]
#[serde(rename = "storageConfiguration")]
pub storage_configuration: LongTermStorageConfigurationOutput,
#[doc = "The warm store configuration provides the details to create a warm store cache that will retain a copy of the environment's data available for faster query."]
#[serde(rename = "warmStoreConfiguration", default, skip_serializing_if = "Option::is_none")]
pub warm_store_configuration: Option<WarmStoreConfigurationProperties>,
}
impl LongTermEnvironmentResourceProperties {
pub fn new(time_series_id_properties: Vec<TimeSeriesIdProperty>, storage_configuration: LongTermStorageConfigurationOutput) -> Self {
Self {
environment_resource_properties: EnvironmentResourceProperties::default(),
resource_properties: ResourceProperties::default(),
time_series_id_properties,
storage_configuration,
warm_store_configuration: None,
}
}
}
#[doc = "Parameters supplied to the Update Environment operation to update a long-term environment."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct LongTermEnvironmentUpdateParameters {
#[serde(flatten)]
pub environment_update_parameters: EnvironmentUpdateParameters,
#[doc = "An object that represents a set of mutable long-term environment resource properties."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<LongTermEnvironmentMutableProperties>,
}
impl LongTermEnvironmentUpdateParameters {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "The storage configuration provides the connection details that allows the Time Series Insights service to connect to the customer storage account that is used to store the environment's data."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LongTermStorageConfigurationInput {
#[doc = "The name of the storage account that will hold the environment's long term data."]
#[serde(rename = "accountName")]
pub account_name: String,
#[doc = "The value of the management key that grants the Time Series Insights service write access to the storage account. This property is not shown in environment responses."]
#[serde(rename = "managementKey")]
pub management_key: String,
}
impl LongTermStorageConfigurationInput {
pub fn new(account_name: String, management_key: String) -> Self {
Self {
account_name,
management_key,
}
}
}
#[doc = "The storage configuration provides the connection details that allows the Time Series Insights service to connect to the customer storage account that is used to store the environment's data."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LongTermStorageConfigurationMutableProperties {
#[doc = "The value of the management key that grants the Time Series Insights service write access to the storage account. This property is not shown in environment responses."]
#[serde(rename = "managementKey")]
pub management_key: String,
}
impl LongTermStorageConfigurationMutableProperties {
pub fn new(management_key: String) -> Self {
Self { management_key }
}
}
#[doc = "The storage configuration provides the non-secret connection details about the customer storage account that is used to store the environment's data."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LongTermStorageConfigurationOutput {
#[doc = "The name of the storage account that will hold the environment's long term data."]
#[serde(rename = "accountName")]
pub account_name: String,
}
impl LongTermStorageConfigurationOutput {
pub fn new(account_name: String) -> Self {
Self { account_name }
}
}
#[doc = "A Time Series Insights REST API operation"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Operation {
#[doc = "The name of the operation being performed on this particular object."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[doc = "Contains the localized display information for this particular operation / action."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub display: Option<operation::Display>,
}
impl Operation {
pub fn new() -> Self {
Self::default()
}
}
pub mod operation {
use super::*;
#[doc = "Contains the localized display information for this particular operation / action."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Display {
#[doc = "The localized friendly form of the resource provider name."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub provider: Option<String>,
#[doc = "The localized friendly form of the resource type related to this action/operation."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resource: Option<String>,
#[doc = "The localized friendly name for the operation."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub operation: Option<String>,
#[doc = "The localized friendly description for the operation."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
}
impl Display {
pub fn new() -> Self {
Self::default()
}
}
}
#[doc = "Result of the request to list Time Series Insights operations. It contains a list of operations and a URL link to get the next set of results."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct OperationListResult {
#[doc = "List of Time Series Insights operations supported by the Microsoft.TimeSeriesInsights resource provider."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Operation>,
#[doc = "URL to get the next set of operation list results if there are any."]
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
impl azure_core::Continuable for OperationListResult {
fn continuation(&self) -> Option<String> {
self.next_link.clone()
}
}
impl OperationListResult {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Provisioning state of the resource."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(remote = "ProvisioningState")]
pub enum ProvisioningState {
Accepted,
Creating,
Updating,
Succeeded,
Failed,
Deleting,
#[serde(skip_deserializing)]
UnknownValue(String),
}
impl FromStr for ProvisioningState {
type Err = value::Error;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Self::deserialize(s.into_deserializer())
}
}
impl<'de> Deserialize<'de> for ProvisioningState {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
let deserialized = Self::from_str(&s).unwrap_or(Self::UnknownValue(s));
Ok(deserialized)
}
}
impl Serialize for ProvisioningState {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
match self {
Self::Accepted => serializer.serialize_unit_variant("ProvisioningState", 0u32, "Accepted"),
Self::Creating => serializer.serialize_unit_variant("ProvisioningState", 1u32, "Creating"),
Self::Updating => serializer.serialize_unit_variant("ProvisioningState", 2u32, "Updating"),
Self::Succeeded => serializer.serialize_unit_variant("ProvisioningState", 3u32, "Succeeded"),
Self::Failed => serializer.serialize_unit_variant("ProvisioningState", 4u32, "Failed"),
Self::Deleting => serializer.serialize_unit_variant("ProvisioningState", 5u32, "Deleting"),
Self::UnknownValue(s) => serializer.serialize_str(s.as_str()),
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ReferenceDataSetCreateOrUpdateParameters {
#[serde(flatten)]
pub create_or_update_tracked_resource_properties: CreateOrUpdateTrackedResourceProperties,
#[doc = "Properties used to create a reference data set."]
pub properties: ReferenceDataSetCreationProperties,
}
impl ReferenceDataSetCreateOrUpdateParameters {
pub fn new(
create_or_update_tracked_resource_properties: CreateOrUpdateTrackedResourceProperties,
properties: ReferenceDataSetCreationProperties,
) -> Self {
Self {
create_or_update_tracked_resource_properties,
properties,
}
}
}
#[doc = "Properties used to create a reference data set."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ReferenceDataSetCreationProperties {
#[doc = "The list of key properties for the reference data set."]
#[serde(rename = "keyProperties")]
pub key_properties: Vec<ReferenceDataSetKeyProperty>,
#[doc = "The reference data set key comparison behavior can be set using this property. By default, the value is 'Ordinal' - which means case sensitive key comparison will be performed while joining reference data with events or while adding new reference data. When 'OrdinalIgnoreCase' is set, case insensitive comparison will be used."]
#[serde(rename = "dataStringComparisonBehavior", default, skip_serializing_if = "Option::is_none")]
pub data_string_comparison_behavior: Option<reference_data_set_creation_properties::DataStringComparisonBehavior>,
}
impl ReferenceDataSetCreationProperties {
pub fn new(key_properties: Vec<ReferenceDataSetKeyProperty>) -> Self {
Self {
key_properties,
data_string_comparison_behavior: None,
}
}
}
pub mod reference_data_set_creation_properties {
use super::*;
#[doc = "The reference data set key comparison behavior can be set using this property. By default, the value is 'Ordinal' - which means case sensitive key comparison will be performed while joining reference data with events or while adding new reference data. When 'OrdinalIgnoreCase' is set, case insensitive comparison will be used."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(remote = "DataStringComparisonBehavior")]
pub enum DataStringComparisonBehavior {
Ordinal,
OrdinalIgnoreCase,
#[serde(skip_deserializing)]
UnknownValue(String),
}
impl FromStr for DataStringComparisonBehavior {
type Err = value::Error;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Self::deserialize(s.into_deserializer())
}
}
impl<'de> Deserialize<'de> for DataStringComparisonBehavior {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
let deserialized = Self::from_str(&s).unwrap_or(Self::UnknownValue(s));
Ok(deserialized)
}
}
impl Serialize for DataStringComparisonBehavior {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
match self {
Self::Ordinal => serializer.serialize_unit_variant("DataStringComparisonBehavior", 0u32, "Ordinal"),
Self::OrdinalIgnoreCase => serializer.serialize_unit_variant("DataStringComparisonBehavior", 1u32, "OrdinalIgnoreCase"),
Self::UnknownValue(s) => serializer.serialize_str(s.as_str()),
}
}
}
}
#[doc = "A key property for the reference data set. A reference data set can have multiple key properties."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ReferenceDataSetKeyProperty {
#[doc = "The name of the key property."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[doc = "The type of the key property."]
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<reference_data_set_key_property::Type>,
}
impl ReferenceDataSetKeyProperty {
pub fn new() -> Self {
Self::default()
}
}
pub mod reference_data_set_key_property {
use super::*;
#[doc = "The type of the key property."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(remote = "Type")]
pub enum Type {
String,
Double,
Bool,
DateTime,
#[serde(skip_deserializing)]
UnknownValue(String),
}
impl FromStr for Type {
type Err = value::Error;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Self::deserialize(s.into_deserializer())
}
}
impl<'de> Deserialize<'de> for Type {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
let deserialized = Self::from_str(&s).unwrap_or(Self::UnknownValue(s));
Ok(deserialized)
}
}
impl Serialize for Type {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
match self {
Self::String => serializer.serialize_unit_variant("Type", 0u32, "String"),
Self::Double => serializer.serialize_unit_variant("Type", 1u32, "Double"),
Self::Bool => serializer.serialize_unit_variant("Type", 2u32, "Bool"),
Self::DateTime => serializer.serialize_unit_variant("Type", 3u32, "DateTime"),
Self::UnknownValue(s) => serializer.serialize_str(s.as_str()),
}
}
}
}
#[doc = "The response of the List Reference Data Sets operation."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ReferenceDataSetListResponse {
#[doc = "Result of the List Reference Data Sets operation."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ReferenceDataSetResource>,
}
impl ReferenceDataSetListResponse {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "A reference data set provides metadata about the events in an environment. Metadata in the reference data set will be joined with events as they are read from event sources. The metadata that makes up the reference data set is uploaded or modified through the Time Series Insights data plane APIs."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ReferenceDataSetResource {
#[serde(flatten)]
pub tracked_resource: TrackedResource,
#[doc = "Properties of the reference data set."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ReferenceDataSetResourceProperties>,
}
impl ReferenceDataSetResource {
pub fn new(tracked_resource: TrackedResource) -> Self {
Self {
tracked_resource,
properties: None,
}
}
}
#[doc = "Properties of the reference data set."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ReferenceDataSetResourceProperties {
#[serde(flatten)]
pub reference_data_set_creation_properties: ReferenceDataSetCreationProperties,
#[serde(flatten)]
pub resource_properties: ResourceProperties,
}
impl ReferenceDataSetResourceProperties {
pub fn new(reference_data_set_creation_properties: ReferenceDataSetCreationProperties) -> Self {
Self {
reference_data_set_creation_properties,
resource_properties: ResourceProperties::default(),
}
}
}
#[doc = "Parameters supplied to the Update Reference Data Set operation."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ReferenceDataSetUpdateParameters {
#[doc = "Key-value pairs of additional properties for the reference data set."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
}
impl ReferenceDataSetUpdateParameters {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Time Series Insights resource"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Resource {
#[doc = "Resource Id"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[doc = "Resource name"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[doc = "Resource type"]
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
}
impl Resource {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Properties that are common to all tracked resources."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ResourceProperties {
#[doc = "Provisioning state of the resource."]
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<ProvisioningState>,
#[doc = "The time the resource was created."]
#[serde(rename = "creationTime", default, skip_serializing_if = "Option::is_none")]
pub creation_time: Option<String>,
}
impl ResourceProperties {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "The sku determines the type of environment, either standard (S1 or S2) or long-term (L1). For standard environments the sku determines the capacity of the environment, the ingress rate, and the billing rate."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Sku {
#[doc = "The name of this SKU."]
pub name: sku::Name,
#[doc = "The capacity of the sku. For standard environments, this value can be changed to support scale out of environments after they have been created."]
pub capacity: i32,
}
impl Sku {
pub fn new(name: sku::Name, capacity: i32) -> Self {
Self { name, capacity }
}
}
pub mod sku {
use super::*;
#[doc = "The name of this SKU."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(remote = "Name")]
pub enum Name {
S1,
S2,
P1,
L1,
#[serde(skip_deserializing)]
UnknownValue(String),
}
impl FromStr for Name {
type Err = value::Error;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Self::deserialize(s.into_deserializer())
}
}
impl<'de> Deserialize<'de> for Name {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
let deserialized = Self::from_str(&s).unwrap_or(Self::UnknownValue(s));
Ok(deserialized)
}
}
impl Serialize for Name {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
match self {
Self::S1 => serializer.serialize_unit_variant("Name", 0u32, "S1"),
Self::S2 => serializer.serialize_unit_variant("Name", 1u32, "S2"),
Self::P1 => serializer.serialize_unit_variant("Name", 2u32, "P1"),
Self::L1 => serializer.serialize_unit_variant("Name", 3u32, "L1"),
Self::UnknownValue(s) => serializer.serialize_str(s.as_str()),
}
}
}
}
#[doc = "Parameters supplied to the Create or Update Environment operation for a standard environment."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StandardEnvironmentCreateOrUpdateParameters {
#[serde(flatten)]
pub environment_create_or_update_parameters: EnvironmentCreateOrUpdateParameters,
#[doc = "Properties used to create a standard environment."]
pub properties: StandardEnvironmentCreationProperties,
}
impl StandardEnvironmentCreateOrUpdateParameters {
pub fn new(
environment_create_or_update_parameters: EnvironmentCreateOrUpdateParameters,
properties: StandardEnvironmentCreationProperties,
) -> Self {
Self {
environment_create_or_update_parameters,
properties,
}
}
}
#[doc = "Properties used to create a standard environment."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StandardEnvironmentCreationProperties {
#[doc = "ISO8601 timespan specifying the minimum number of days the environment's events will be available for query."]
#[serde(rename = "dataRetentionTime")]
pub data_retention_time: String,
#[doc = "The behavior the Time Series Insights service should take when the environment's capacity has been exceeded. If \"PauseIngress\" is specified, new events will not be read from the event source. If \"PurgeOldData\" is specified, new events will continue to be read and old events will be deleted from the environment. The default behavior is PurgeOldData."]
#[serde(rename = "storageLimitExceededBehavior", default, skip_serializing_if = "Option::is_none")]
pub storage_limit_exceeded_behavior: Option<standard_environment_creation_properties::StorageLimitExceededBehavior>,
#[doc = "The list of event properties which will be used to partition data in the environment. Currently, only a single partition key property is supported."]
#[serde(rename = "partitionKeyProperties", default, skip_serializing_if = "Vec::is_empty")]
pub partition_key_properties: Vec<TimeSeriesIdProperty>,
}
impl StandardEnvironmentCreationProperties {
pub fn new(data_retention_time: String) -> Self {
Self {
data_retention_time,
storage_limit_exceeded_behavior: None,
partition_key_properties: Vec::new(),
}
}
}
pub mod standard_environment_creation_properties {
use super::*;
#[doc = "The behavior the Time Series Insights service should take when the environment's capacity has been exceeded. If \"PauseIngress\" is specified, new events will not be read from the event source. If \"PurgeOldData\" is specified, new events will continue to be read and old events will be deleted from the environment. The default behavior is PurgeOldData."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(remote = "StorageLimitExceededBehavior")]
pub enum StorageLimitExceededBehavior {
PurgeOldData,
PauseIngress,
#[serde(skip_deserializing)]
UnknownValue(String),
}
impl FromStr for StorageLimitExceededBehavior {
type Err = value::Error;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Self::deserialize(s.into_deserializer())
}
}
impl<'de> Deserialize<'de> for StorageLimitExceededBehavior {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
let deserialized = Self::from_str(&s).unwrap_or(Self::UnknownValue(s));
Ok(deserialized)
}
}
impl Serialize for StorageLimitExceededBehavior {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
match self {
Self::PurgeOldData => serializer.serialize_unit_variant("StorageLimitExceededBehavior", 0u32, "PurgeOldData"),
Self::PauseIngress => serializer.serialize_unit_variant("StorageLimitExceededBehavior", 1u32, "PauseIngress"),
Self::UnknownValue(s) => serializer.serialize_str(s.as_str()),
}
}
}
}
#[doc = "An object that represents a set of mutable standard environment resource properties."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct StandardEnvironmentMutableProperties {
#[doc = "ISO8601 timespan specifying the minimum number of days the environment's events will be available for query."]
#[serde(rename = "dataRetentionTime", default, skip_serializing_if = "Option::is_none")]
pub data_retention_time: Option<String>,
#[doc = "The behavior the Time Series Insights service should take when the environment's capacity has been exceeded. If \"PauseIngress\" is specified, new events will not be read from the event source. If \"PurgeOldData\" is specified, new events will continue to be read and old events will be deleted from the environment. The default behavior is PurgeOldData."]
#[serde(rename = "storageLimitExceededBehavior", default, skip_serializing_if = "Option::is_none")]
pub storage_limit_exceeded_behavior: Option<standard_environment_mutable_properties::StorageLimitExceededBehavior>,
}
impl StandardEnvironmentMutableProperties {
pub fn new() -> Self {
Self::default()
}
}
pub mod standard_environment_mutable_properties {
use super::*;
#[doc = "The behavior the Time Series Insights service should take when the environment's capacity has been exceeded. If \"PauseIngress\" is specified, new events will not be read from the event source. If \"PurgeOldData\" is specified, new events will continue to be read and old events will be deleted from the environment. The default behavior is PurgeOldData."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(remote = "StorageLimitExceededBehavior")]
pub enum StorageLimitExceededBehavior {
PurgeOldData,
PauseIngress,
#[serde(skip_deserializing)]
UnknownValue(String),
}
impl FromStr for StorageLimitExceededBehavior {
type Err = value::Error;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Self::deserialize(s.into_deserializer())
}
}
impl<'de> Deserialize<'de> for StorageLimitExceededBehavior {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
let deserialized = Self::from_str(&s).unwrap_or(Self::UnknownValue(s));
Ok(deserialized)
}
}
impl Serialize for StorageLimitExceededBehavior {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
match self {
Self::PurgeOldData => serializer.serialize_unit_variant("StorageLimitExceededBehavior", 0u32, "PurgeOldData"),
Self::PauseIngress => serializer.serialize_unit_variant("StorageLimitExceededBehavior", 1u32, "PauseIngress"),
Self::UnknownValue(s) => serializer.serialize_str(s.as_str()),
}
}
}
}
#[doc = "An environment is a set of time-series data available for query, and is the top level Azure Time Series Insights resource. Standard environments have data retention limits."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StandardEnvironmentResource {
#[serde(flatten)]
pub environment_resource: EnvironmentResource,
#[doc = "Properties of the standard environment."]
pub properties: StandardEnvironmentResourceProperties,
}
impl StandardEnvironmentResource {
pub fn new(environment_resource: EnvironmentResource, properties: StandardEnvironmentResourceProperties) -> Self {
Self {
environment_resource,
properties,
}
}
}
#[doc = "Properties of the standard environment."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StandardEnvironmentResourceProperties {
#[serde(flatten)]
pub standard_environment_creation_properties: StandardEnvironmentCreationProperties,
#[serde(flatten)]
pub environment_resource_properties: EnvironmentResourceProperties,
#[serde(flatten)]
pub resource_properties: ResourceProperties,
}
impl StandardEnvironmentResourceProperties {
pub fn new(standard_environment_creation_properties: StandardEnvironmentCreationProperties) -> Self {
Self {
standard_environment_creation_properties,
environment_resource_properties: EnvironmentResourceProperties::default(),
resource_properties: ResourceProperties::default(),
}
}
}
#[doc = "Parameters supplied to the Update Environment operation to update a standard environment."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct StandardEnvironmentUpdateParameters {
#[serde(flatten)]
pub environment_update_parameters: EnvironmentUpdateParameters,
#[doc = "The sku determines the type of environment, either standard (S1 or S2) or long-term (L1). For standard environments the sku determines the capacity of the environment, the ingress rate, and the billing rate."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<Sku>,
#[doc = "An object that represents a set of mutable standard environment resource properties."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<StandardEnvironmentMutableProperties>,
}
impl StandardEnvironmentUpdateParameters {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "The structure of the property that a time series id can have. An environment can have multiple such properties."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct TimeSeriesIdProperty {
#[doc = "The name of the property."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[doc = "The type of the property."]
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<time_series_id_property::Type>,
}
impl TimeSeriesIdProperty {
pub fn new() -> Self {
Self::default()
}
}
pub mod time_series_id_property {
use super::*;
#[doc = "The type of the property."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(remote = "Type")]
pub enum Type {
String,
#[serde(skip_deserializing)]
UnknownValue(String),
}
impl FromStr for Type {
type Err = value::Error;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Self::deserialize(s.into_deserializer())
}
}
impl<'de> Deserialize<'de> for Type {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
let deserialized = Self::from_str(&s).unwrap_or(Self::UnknownValue(s));
Ok(deserialized)
}
}
impl Serialize for Type {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
match self {
Self::String => serializer.serialize_unit_variant("Type", 0u32, "String"),
Self::UnknownValue(s) => serializer.serialize_str(s.as_str()),
}
}
}
}
#[doc = "Time Series Insights resource that is tracked by Azure Resource Manager."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TrackedResource {
#[serde(flatten)]
pub resource: Resource,
#[doc = "Resource location"]
pub location: String,
#[doc = "Resource tags"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
}
impl TrackedResource {
pub fn new(location: String) -> Self {
Self {
resource: Resource::default(),
location,
tags: None,
}
}
}
#[doc = "An object that represents the status of warm storage on an environment."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct WarmStorageEnvironmentStatus {
#[doc = "An object that contains the status of warm storage properties usage."]
#[serde(rename = "propertiesUsage", default, skip_serializing_if = "Option::is_none")]
pub properties_usage: Option<WarmStoragePropertiesUsage>,
}
impl WarmStorageEnvironmentStatus {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "An object that contains the status of warm storage properties usage."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct WarmStoragePropertiesUsage {
#[doc = "This string represents the state of warm storage properties usage. It can be \"Ok\", \"Error\", \"Unknown\"."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub state: Option<warm_storage_properties_usage::State>,
#[doc = "An object that contains the details about warm storage properties usage state."]
#[serde(rename = "stateDetails", default, skip_serializing_if = "Option::is_none")]
pub state_details: Option<WarmStoragePropertiesUsageStateDetails>,
}
impl WarmStoragePropertiesUsage {
pub fn new() -> Self {
Self::default()
}
}
pub mod warm_storage_properties_usage {
use super::*;
#[doc = "This string represents the state of warm storage properties usage. It can be \"Ok\", \"Error\", \"Unknown\"."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(remote = "State")]
pub enum State {
Ok,
Error,
Unknown,
#[serde(skip_deserializing)]
UnknownValue(String),
}
impl FromStr for State {
type Err = value::Error;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Self::deserialize(s.into_deserializer())
}
}
impl<'de> Deserialize<'de> for State {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
let deserialized = Self::from_str(&s).unwrap_or(Self::UnknownValue(s));
Ok(deserialized)
}
}
impl Serialize for State {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
match self {
Self::Ok => serializer.serialize_unit_variant("State", 0u32, "Ok"),
Self::Error => serializer.serialize_unit_variant("State", 1u32, "Error"),
Self::Unknown => serializer.serialize_unit_variant("State", 2u32, "Unknown"),
Self::UnknownValue(s) => serializer.serialize_str(s.as_str()),
}
}
}
}
#[doc = "An object that contains the details about warm storage properties usage state."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct WarmStoragePropertiesUsageStateDetails {
#[doc = "A value that represents the number of properties used by the environment for S1/S2 SKU and number of properties used by Warm Store for PAYG SKU"]
#[serde(rename = "currentCount", default, skip_serializing_if = "Option::is_none")]
pub current_count: Option<i32>,
#[doc = "A value that represents the maximum number of properties used allowed by the environment for S1/S2 SKU and maximum number of properties allowed by Warm Store for PAYG SKU."]
#[serde(rename = "maxCount", default, skip_serializing_if = "Option::is_none")]
pub max_count: Option<i32>,
}
impl WarmStoragePropertiesUsageStateDetails {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "The warm store configuration provides the details to create a warm store cache that will retain a copy of the environment's data available for faster query."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WarmStoreConfigurationProperties {
#[doc = "ISO8601 timespan specifying the number of days the environment's events will be available for query from the warm store."]
#[serde(rename = "dataRetention")]
pub data_retention: String,
}
impl WarmStoreConfigurationProperties {
pub fn new(data_retention: String) -> Self {
Self { data_retention }
}
}
| 44.830734 | 412 | 0.691438 |
5b80e355a940c665de9565f35285e752303fc365 | 4,234 | use std::{
fmt,
mem::{replace, uninitialized, ManuallyDrop},
sync::atomic::{
AtomicBool,
Ordering::{self, *},
},
};
/// A shared removable value. You can only take values from this type (no
/// insertion allowed). No extra allocation is necessary. It may be useful for
/// things like shared `thread::JoinHandle`s.
pub struct Removable<T> {
item: ManuallyDrop<T>,
present: AtomicBool,
}
impl<T> Removable<T> {
/// Creates a removable item with the passed argument as a present value.
pub fn new(val: T) -> Self {
Self { item: ManuallyDrop::new(val), present: AtomicBool::new(true) }
}
/// Creates a removable item with no present value.
pub fn empty() -> Self {
Self {
// This is safe because we will only read from the item if present
// is true. Present will only be true if we write to it.
item: ManuallyDrop::new(unsafe { uninitialized() }),
present: AtomicBool::new(false),
}
}
/// Replaces the stored value with a given one and returns the old value.
/// Requires a mutable reference since the type of the value might not be
/// atomic.
pub fn replace(&mut self, val: Option<T>) -> Option<T> {
let present = self.present.get_mut();
match val {
Some(val) => {
if *present {
Some(replace(&mut *self.item, val))
} else {
// Safe because we get the pointer from a valid reference
// and present will only be false if item is uninitialized.
*present = true;
unsafe { (&mut *self.item as *mut T).write(val) };
None
}
},
None if *present => {
// Safe because we get the pointer from a valid reference
// and present will only be false if item is uninitialized.
*present = false;
Some(unsafe { (&*self.item as *const T).read() })
},
None => None,
}
}
/// Tries to get a mutable reference to the stored value. If the value was
/// not present, `None` is returned.
pub fn get_mut(&mut self) -> Option<&mut T> {
if *self.present.get_mut() {
Some(&mut *self.item)
} else {
None
}
}
/// Tests if the stored value is present. Note that there are no guarantees
/// that `take` will be successful if this method returns `true` because
/// some other thread could take the value meanwhile.
pub fn is_present(&self, ordering: Ordering) -> bool {
self.present.load(ordering)
}
/// Tries to take the value. If no value was present in first place, `None`
/// is returned. In terms of memory ordering, `AcqRel` should be enough.
pub fn take(&self, ordering: Ordering) -> Option<T> {
if self.present.swap(false, ordering) {
// Safe because if present was true, the memory was initialized. All
// other reads won't happen because we set present to false.
Some(unsafe { (&*self.item as *const T).read() })
} else {
None
}
}
}
impl<T> fmt::Debug for Removable<T> {
fn fmt(&self, fmtr: &mut fmt::Formatter) -> fmt::Result {
write!(
fmtr,
"Removable {} present: {:?} {}",
'{',
self.is_present(Relaxed),
'}'
)
}
}
impl<T> Default for Removable<T> {
fn default() -> Self {
Self::empty()
}
}
impl<T> Drop for Removable<T> {
fn drop(&mut self) {
if *self.present.get_mut() {
// Safe because present will only be true when the memory is
// initialized. And now we are at drop.
unsafe { ManuallyDrop::drop(&mut self.item) }
}
}
}
impl<T> From<Option<T>> for Removable<T> {
fn from(opt: Option<T>) -> Self {
match opt {
Some(item) => Self::new(item),
None => Self::empty(),
}
}
}
unsafe impl<T> Send for Removable<T> where T: Send {}
unsafe impl<T> Sync for Removable<T> where T: Send {}
| 31.834586 | 80 | 0.546528 |
fcbdb2460df03643b9c33ecef0d6ebfd00bdd213 | 531 | cfg_if! {
if #[cfg(all(target_arch = "wasm32", feature = "wbindgen"))] {
mod wasm_bindgen;
pub use self::wasm_bindgen::random::Random;
pub use self::wasm_bindgen::instant::Instant;
}
else if #[cfg(all(target_arch = "wasm32", feature = "mquad"))] {
mod miniquad;
pub use self::miniquad::random::Random;
pub use self::miniquad::instant::Instant;
}
else {
mod native;
pub use native::random::Random;
pub use native::instant::Instant;
}
}
| 29.5 | 68 | 0.580038 |
ddc6d254987613c4a9c8b528bd6cf4f6e5e0de55 | 9,986 | #![deny(unused_imports, unused_must_use)]
//! # Crossterm
//!
//! Have you ever been disappointed when a terminal library for rust was only written for UNIX systems?
//! Crossterm provides clearing, event (input) handling, styling, cursor movement, and terminal actions for both
//! Windows and UNIX systems.
//!
//! Crossterm aims to be simple and easy to call in code. Through the simplicity of Crossterm, you do not
//! have to worry about the platform you are working with.
//!
//! This crate supports all UNIX and Windows terminals down to Windows 7 (not all terminals are tested
//! see [Tested Terminals](https://github.com/crossterm-rs/crossterm#tested-terminals)
//! for more info).
//!
//! ## Command API
//!
//! The command API makes the use of `crossterm` much easier and offers more control over when and how a
//! command is executed. A command is just an action you can perform on the terminal e.g. cursor movement.
//!
//! The command API offers:
//!
//! * Better Performance.
//! * Complete control over when to flush.
//! * Complete control over where the ANSI escape commands are executed to.
//! * Way easier and nicer API.
//!
//! There are two ways to use the API command:
//!
//! * Functions can execute commands on types that implement Write. Functions are easier to use and debug.
//! There is a disadvantage, and that is that there is a boilerplate code involved.
//! * Macros are generally seen as more difficult and aren't always well supported by editors but offer an API with less boilerplate code. If you are
//! not afraid of macros, this is a recommendation.
//!
//! Linux and Windows 10 systems support ANSI escape codes. Those ANSI escape codes are strings or rather a
//! byte sequence. When we `write` and `flush` those to the terminal we can perform some action.
//! For older windows systems a WinAPI call is made.
//!
//! ### Supported Commands
//!
//! - Module [`cursor`](cursor/index.html)
//! - Visibility - [`Show`](cursor/struct.Show.html), [`Hide`](cursor/struct.Hide.html)
//! - Appearance - [`EnableBlinking`](cursor/struct.EnableBlinking.html),
//! [`DisableBlinking`](cursor/struct.DisableBlinking.html)
//! - Position -
//! [`SavePosition`](cursor/struct.SavePosition.html), [`RestorePosition`](cursor/struct.RestorePosition.html),
//! [`MoveUp`](cursor/struct.MoveUp.html), [`MoveDown`](cursor/struct.MoveDown.html),
//! [`MoveLeft`](cursor/struct.MoveLeft.html), [`MoveRight`](cursor/struct.MoveRight.html),
//! [`MoveTo`](cursor/struct.MoveTo.html), [`MoveToColumn`](cursor/struct.MoveToColumn.html),[`MoveToRow`](cursor/struct.MoveToRow.html),
//! [`MoveToNextLine`](cursor/struct.MoveToNextLine.html), [`MoveToPreviousLine`](cursor/struct.MoveToPreviousLine.html),
//! - Shape -
//! [`SetCursorShape`](cursor/struct.SetCursorShape.html)
//! - Module [`event`](event/index.html)
//! - Mouse events - [`EnableMouseCapture`](event/struct.EnableMouseCapture.html),
//! [`DisableMouseCapture`](event/struct.DisableMouseCapture.html)
//! - Module [`style`](style/index.html)
//! - Colors - [`SetForegroundColor`](style/struct.SetForegroundColor.html),
//! [`SetBackgroundColor`](style/struct.SetBackgroundColor.html),
//! [`ResetColor`](style/struct.ResetColor.html), [`SetColors`](style/struct.SetColors.html)
//! - Attributes - [`SetAttribute`](style/struct.SetAttribute.html), [`SetAttributes`](style/struct.SetAttributes.html),
//! [`PrintStyledContent`](style/struct.PrintStyledContent.html)
//! - Module [`terminal`](terminal/index.html)
//! - Scrolling - [`ScrollUp`](terminal/struct.ScrollUp.html),
//! [`ScrollDown`](terminal/struct.ScrollDown.html)
//! - Miscellaneous - [`Clear`](terminal/struct.Clear.html),
//! [`SetSize`](terminal/struct.SetSize.html)
//! [`SetTitle`](terminal/struct.SetTitle.html)
//! [`DisableLineWrap`](terminal/struct.DisableLineWrap.html)
//! [`EnableLineWrap`](terminal/struct.EnableLineWrap.html)
//! - Alternate screen - [`EnterAlternateScreen`](terminal/struct.EnterAlternateScreen.html),
//! [`LeaveAlternateScreen`](terminal/struct.LeaveAlternateScreen.html)
//!
//! ### Command Execution
//!
//! There are two different ways to execute commands:
//!
//! * [Lazy Execution](#lazy-execution)
//! * [Direct Execution](#direct-execution)
//!
//! #### Lazy Execution
//!
//! Flushing bytes to the terminal buffer is a heavy system call. If we perform a lot of actions with the terminal,
//! we want to do this periodically - like with a TUI editor - so that we can flush more data to the terminal buffer
//! at the same time.
//!
//! Crossterm offers the possibility to do this with `queue`.
//! With `queue` you can queue commands, and when you call [Write::flush][flush] these commands will be executed.
//!
//! You can pass a custom buffer implementing [std::io::Write][write] to this `queue` operation.
//! The commands will be executed on that buffer.
//! The most common buffer is [std::io::stdout][stdout] however, [std::io::stderr][stderr] is used sometimes as well.
//!
//! ##### Examples
//!
//! A simple demonstration that shows the command API in action with cursor commands.
//!
//! Functions:
//!
//! ```no_run
//! use std::io::{Write, stdout};
//! use crossterm::{QueueableCommand, cursor};
//!
//! let mut stdout = stdout();
//! stdout.queue(cursor::MoveTo(5,5));
//!
//! // some other code ...
//!
//! stdout.flush();
//! ```
//!
//! The [queue](./trait.QueueableCommand.html) function returns itself, therefore you can use this to queue another
//! command. Like `stdout.queue(Goto(5,5)).queue(Clear(ClearType::All))`.
//!
//! Macros:
//!
//! ```no_run
//! use std::io::{Write, stdout};
//! use crossterm::{queue, QueueableCommand, cursor};
//!
//! let mut stdout = stdout();
//! queue!(stdout, cursor::MoveTo(5, 5));
//!
//! // some other code ...
//!
//! // move operation is performed only if we flush the buffer.
//! stdout.flush();
//! ```
//!
//! You can pass more than one command into the [queue](./macro.queue.html) macro like
//! `queue!(stdout, MoveTo(5, 5), Clear(ClearType::All))` and
//! they will be executed in the given order from left to right.
//!
//! #### Direct Execution
//!
//! For many applications it is not at all important to be efficient with 'flush' operations.
//! For this use case there is the `execute` operation.
//! This operation executes the command immediately, and calls the `flush` under water.
//!
//! You can pass a custom buffer implementing [std::io::Write][write] to this `execute` operation.
//! The commands will be executed on that buffer.
//! The most common buffer is [std::io::stdout][stdout] however, [std::io::stderr][stderr] is used sometimes as well.
//!
//! ##### Examples
//!
//! Functions:
//!
//! ```no_run
//! use std::io::{Write, stdout};
//! use crossterm::{ExecutableCommand, cursor};
//!
//! let mut stdout = stdout();
//! stdout.execute(cursor::MoveTo(5,5));
//! ```
//! The [execute](./trait.ExecutableCommand.html) function returns itself, therefore you can use this to queue
//! another command. Like `stdout.execute(Goto(5,5))?.execute(Clear(ClearType::All))`.
//!
//! Macros:
//!
//! ```no_run
//! use std::io::{Write, stdout};
//! use crossterm::{execute, ExecutableCommand, cursor};
//!
//! let mut stdout = stdout();
//! execute!(stdout, cursor::MoveTo(5, 5));
//! ```
//! You can pass more than one command into the [execute](./macro.execute.html) macro like
//! `execute!(stdout, MoveTo(5, 5), Clear(ClearType::All))` and they will be executed in the given order from
//! left to right.
//!
//! ## Examples
//!
//! Print a rectangle colored with magenta and use both direct execution and lazy execution.
//!
//! Functions:
//!
//! ```no_run
//! use std::io::{stdout, Write};
//! use crossterm::{
//! ExecutableCommand, QueueableCommand,
//! terminal, cursor, style::{self, Stylize}, Result
//! };
//!
//! fn main() -> Result<()> {
//! let mut stdout = stdout();
//!
//! stdout.execute(terminal::Clear(terminal::ClearType::All))?;
//!
//! for y in 0..40 {
//! for x in 0..150 {
//! if (y == 0 || y == 40 - 1) || (x == 0 || x == 150 - 1) {
//! // in this loop we are more efficient by not flushing the buffer.
//! stdout
//! .queue(cursor::MoveTo(x,y))?
//! .queue(style::PrintStyledContent( "█".magenta()))?;
//! }
//! }
//! }
//! stdout.flush()?;
//! Ok(())
//! }
//! ```
//!
//! Macros:
//!
//! ```no_run
//! use std::io::{stdout, Write};
//! use crossterm::{
//! execute, queue,
//! style::{self, Stylize}, cursor, terminal, Result
//! };
//!
//! fn main() -> Result<()> {
//! let mut stdout = stdout();
//!
//! execute!(stdout, terminal::Clear(terminal::ClearType::All))?;
//!
//! for y in 0..40 {
//! for x in 0..150 {
//! if (y == 0 || y == 40 - 1) || (x == 0 || x == 150 - 1) {
//! // in this loop we are more efficient by not flushing the buffer.
//! queue!(stdout, cursor::MoveTo(x,y), style::PrintStyledContent( "█".magenta()))?;
//! }
//! }
//! }
//! stdout.flush()?;
//! Ok(())
//! }
//!```
//!
//! [write]: https://doc.rust-lang.org/std/io/trait.Write.html
//! [stdout]: https://doc.rust-lang.org/std/io/fn.stdout.html
//! [stderr]: https://doc.rust-lang.org/std/io/fn.stderr.html
//! [flush]: https://doc.rust-lang.org/std/io/trait.Write.html#tymethod.flush
pub use crate::{
command::{Command, ExecutableCommand, QueueableCommand},
error::{ErrorKind, Result},
};
/// A module to work with the terminal cursor
pub mod cursor;
/// A module to read events.
pub mod event;
/// A module to apply attributes and colors on your text.
pub mod style;
/// A module to work with the terminal.
pub mod terminal;
/// A module to query if the current instance is a tty.
pub mod tty;
#[cfg(windows)]
/// A module that exposes one function to check if the current terminal supports ansi sequences.
pub mod ansi_support;
mod command;
mod error;
pub(crate) mod macros;
| 38.705426 | 149 | 0.658722 |
8a67ee7e7c0d39c6d7cc8f766300b6f6526c6b73 | 7,644 | mod subtype;
mod ty;
pub use self::{subtype::get_subtype, ty::get_type};
use std::{io, mem};
use bytes::Buf;
use noodles_sam::record::data::field::{
value::{Subtype, Type},
Value,
};
/// Reads a BAM record data field value.
///
/// The stream is expected to be at the start of the value, i.e., after the tag and data type.
///
/// # Examples
///
/// ```
/// # use std::io;
/// use noodles_bam::reader::record::data::field::get_value;
/// use noodles_sam::record::data::field::{value::Type, Value};
///
/// let data = [0x01, 0x00, 0x00, 0x00];
/// let mut reader = &data[..];
///
/// assert_eq!(get_value(&mut reader, Type::Int32)?, Value::Int32(1));
/// # Ok::<(), io::Error>(())
/// ```
pub fn get_value<B>(src: &mut B, ty: Type) -> io::Result<Value>
where
B: Buf,
{
match ty {
Type::Char => get_char_value(src),
Type::Int8 => get_i8_value(src),
Type::UInt8 => get_u8_value(src),
Type::Int16 => get_i16_value(src),
Type::UInt16 => get_u16_value(src),
Type::Int32 => get_i32_value(src),
Type::UInt32 => get_u32_value(src),
Type::Float => get_f32_value(src),
Type::String => get_string(src).map(Value::String),
Type::Hex => get_string(src).map(Value::Hex),
Type::Array => get_array_value(src),
}
}
fn get_char_value<B>(src: &mut B) -> io::Result<Value>
where
B: Buf,
{
if src.remaining() < mem::size_of::<u8>() {
return Err(io::Error::from(io::ErrorKind::UnexpectedEof));
}
Ok(Value::Char(char::from(src.get_u8())))
}
fn get_i8_value<B>(src: &mut B) -> io::Result<Value>
where
B: Buf,
{
if src.remaining() < mem::size_of::<i8>() {
return Err(io::Error::from(io::ErrorKind::UnexpectedEof));
}
Ok(Value::Int8(src.get_i8()))
}
fn get_u8_value<B>(src: &mut B) -> io::Result<Value>
where
B: Buf,
{
if src.remaining() < mem::size_of::<u8>() {
return Err(io::Error::from(io::ErrorKind::UnexpectedEof));
}
Ok(Value::UInt8(src.get_u8()))
}
fn get_i16_value<B>(src: &mut B) -> io::Result<Value>
where
B: Buf,
{
if src.remaining() < mem::size_of::<i16>() {
return Err(io::Error::from(io::ErrorKind::UnexpectedEof));
}
Ok(Value::Int16(src.get_i16_le()))
}
fn get_u16_value<B>(src: &mut B) -> io::Result<Value>
where
B: Buf,
{
if src.remaining() < mem::size_of::<u16>() {
return Err(io::Error::from(io::ErrorKind::UnexpectedEof));
}
Ok(Value::UInt16(src.get_u16_le()))
}
fn get_i32_value<B>(src: &mut B) -> io::Result<Value>
where
B: Buf,
{
if src.remaining() < mem::size_of::<i32>() {
return Err(io::Error::from(io::ErrorKind::UnexpectedEof));
}
Ok(Value::Int32(src.get_i32_le()))
}
fn get_u32_value<B>(src: &mut B) -> io::Result<Value>
where
B: Buf,
{
if src.remaining() < mem::size_of::<u32>() {
return Err(io::Error::from(io::ErrorKind::UnexpectedEof));
}
Ok(Value::UInt32(src.get_u32_le()))
}
fn get_f32_value<B>(src: &mut B) -> io::Result<Value>
where
B: Buf,
{
if src.remaining() < mem::size_of::<f32>() {
return Err(io::Error::from(io::ErrorKind::UnexpectedEof));
}
Ok(Value::Float(src.get_f32_le()))
}
fn get_string<B>(src: &mut B) -> io::Result<String>
where
B: Buf,
{
const NUL: u8 = 0x00;
let len = src.chunk().iter().position(|&b| b == NUL).ok_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidData,
"string value missing NUL terminator",
)
})?;
let mut buf = vec![0; len];
src.copy_to_slice(&mut buf);
src.advance(1); // Discard the NUL terminator.
String::from_utf8(buf).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
}
fn get_array_value<B>(src: &mut B) -> io::Result<Value>
where
B: Buf,
{
let subtype = get_subtype(src)?;
let len = usize::try_from(src.get_i32_le())
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
match subtype {
Subtype::Int8 => {
let mut buf = Vec::with_capacity(len);
for _ in 0..len {
buf.push(src.get_i8());
}
Ok(Value::Int8Array(buf))
}
Subtype::UInt8 => {
let mut buf = Vec::with_capacity(len);
for _ in 0..len {
buf.push(src.get_u8());
}
Ok(Value::UInt8Array(buf))
}
Subtype::Int16 => {
let mut buf = Vec::with_capacity(len);
for _ in 0..len {
buf.push(src.get_i16_le());
}
Ok(Value::Int16Array(buf))
}
Subtype::UInt16 => {
let mut buf = Vec::with_capacity(len);
for _ in 0..len {
buf.push(src.get_u16_le());
}
Ok(Value::UInt16Array(buf))
}
Subtype::Int32 => {
let mut buf = Vec::with_capacity(len);
for _ in 0..len {
buf.push(src.get_i32_le());
}
Ok(Value::Int32Array(buf))
}
Subtype::UInt32 => {
let mut buf = Vec::with_capacity(len);
for _ in 0..len {
buf.push(src.get_u32_le());
}
Ok(Value::UInt32Array(buf))
}
Subtype::Float => {
let mut buf = Vec::with_capacity(len);
for _ in 0..len {
buf.push(src.get_f32_le());
}
Ok(Value::FloatArray(buf))
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_get_value() -> io::Result<()> {
fn t(mut data: &[u8], ty: Type, expected: Value) -> io::Result<()> {
let actual = get_value(&mut data, ty)?;
assert_eq!(actual, expected);
Ok(())
}
t(&[b'n'], Type::Char, Value::Char('n'))?;
t(&[0x00], Type::Int8, Value::Int8(0))?;
t(&[0x00], Type::UInt8, Value::UInt8(0))?;
t(&[0x00, 0x00], Type::Int16, Value::Int16(0))?;
t(&[0x00, 0x00], Type::UInt16, Value::UInt16(0))?;
t(&[0x00, 0x00, 0x00, 0x00], Type::Int32, Value::Int32(0))?;
t(&[0x00, 0x00, 0x00, 0x00], Type::UInt32, Value::UInt32(0))?;
t(&[0x00, 0x00, 0x00, 0x00], Type::Float, Value::Float(0.0))?;
t(
&[b'n', b'd', b'l', b's', 0x00],
Type::String,
Value::String(String::from("ndls")),
)?;
t(
&[b'C', b'A', b'F', b'E', 0x00],
Type::Hex,
Value::Hex(String::from("CAFE")),
)?;
/*t(
&[b'c', 0x01, 0x00, 0x00, 0x00, 0x00],
Type::Array,
Value::Int8Array(vec![0]),
)?;
t(
&[b'C', 0x01, 0x00, 0x00, 0x00, 0x00],
Type::Array,
Value::UInt8Array(vec![0]),
)?;
t(
&[b's', 0x01, 0x00, 0x00, 0x00, 0x00, 0x00],
Type::Array,
Value::Int16Array(vec![0]),
)?;
t(
&[b'S', 0x01, 0x00, 0x00, 0x00, 0x00, 0x00],
Type::Array,
Value::UInt16Array(vec![0]),
)?;
t(
&[b'i', 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],
Type::Array,
Value::Int32Array(vec![0]),
)?;
t(
&[b'I', 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],
Type::Array,
Value::UInt32Array(vec![0]),
)?;
t(
&[b'f', 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],
Type::Array,
Value::FloatArray(vec![0.0]),
)?; */
Ok(())
}
}
| 25.144737 | 94 | 0.502616 |
14c631831feb3528687dc708a76198af5d49b48e | 2,500 | //! Types and utilities for working with `BufStream`.
mod chain;
mod collect;
mod from;
mod iter;
mod limit;
mod stream;
pub use self::chain::Chain;
pub use self::collect::Collect;
pub use self::from::FromBufStream;
pub use self::iter::iter;
pub use self::limit::Limit;
pub use self::stream::{stream, IntoStream};
pub mod error {
//! Error types
pub use super::collect::CollectError;
pub use super::from::{CollectBytesError, CollectVecError};
pub use super::limit::LimitError;
}
use BufStream;
impl<T> BufStreamExt for T where T: BufStream {}
/// An extension trait for `BufStream`'s that provides a variety of convenient
/// adapters.
pub trait BufStreamExt: BufStream {
/// Takes two buf streams and creates a new buf stream over both in
/// sequence.
///
/// `chain()` returns a new `BufStream` value which will first yield all
/// data from `self` then all data from `other`.
///
/// In other words, it links two buf streams together, in a chain.
fn chain<T>(self, other: T) -> Chain<Self, T>
where
Self: Sized,
T: BufStream<Error = Self::Error>,
{
Chain::new(self, other)
}
/// Consumes all data from `self`, storing it in byte storage of type `T`.
///
/// `collect()` returns a future that buffers all data yielded from `self`
/// into storage of type of `T`. The future completes once `self` yield
/// `None`, returning the buffered data.
///
/// The collect future will yield an error if `self` yields an error or if
/// the collect operation errors. The collect error cases are dependent on
/// the target storage type.
fn collect<T>(self) -> Collect<Self, T>
where
Self: Sized,
T: FromBufStream<Self::Item>,
{
Collect::new(self)
}
/// Limit the number of bytes that the stream can yield.
///
/// `limit()` returns a new `BufStream` value which yields all the data from
/// `self` while ensuring that at most `amount` bytes are yielded.
///
/// If `self` can yield greater than `amount` bytes, the returned stream
/// will yield an error.
fn limit(self, amount: u64) -> Limit<Self>
where
Self: Sized,
{
Limit::new(self, amount)
}
/// Creates a `Stream` from a `BufStream`.
///
/// This produces a `Stream` of `BufStream::Items`.
fn into_stream(self) -> IntoStream<Self>
where
Self: Sized,
{
IntoStream::new(self)
}
}
| 28.409091 | 80 | 0.6276 |
bb769790c103c4a0cb5e0c3437d2bd2bcbc817b8 | 2,738 | //!Surface area heuristic
use super::aabb::*;
pub trait HasSurfaceArea {
fn surface_area(&self) -> f32;
}
pub struct SAHConstants {
pub cost_traversal: f32,
pub cost_triangle_intersection: f32
}
fn compute_surface_area<T: HasSurfaceArea>(objects: &[T]) -> f32 {
objects.iter()
.map(|obj| obj.surface_area())
.sum()
}
fn surface_area_heuristic<T: HasSurfaceArea>(
left_objects: &[T],
right_objects: &[T],
sah_constants: &SAHConstants
) -> f32 {
let left_surface_area = compute_surface_area(left_objects);
let right_surface_area = compute_surface_area(right_objects);
let total_surface_area = left_surface_area + right_surface_area;
sah_constants.cost_traversal + sah_constants.cost_triangle_intersection * {
left_surface_area / total_surface_area * left_objects.len() as f32 +
right_surface_area / total_surface_area * right_objects.len() as f32
}
}
pub trait BVHSplitter {
/// Computes an index where the bvh should be split.
/// returns 0 if there should be no split
fn get_spliting_index<T>(&self, sorted_objects: &[T]) -> usize
where T: HasAABoundingBox + HasSurfaceArea;
}
pub struct MedianIndexSplitter {
pub num_objects_in_leaf: usize
}
impl BVHSplitter for MedianIndexSplitter {
fn get_spliting_index<T>(&self, sorted_objects: &[T]) -> usize
where T: HasAABoundingBox + HasSurfaceArea
{
if sorted_objects.len() <= self.num_objects_in_leaf {
0
} else {
sorted_objects.len() / 2
}
}
}
pub struct SAHSubdivideGuessSplitter {
pub number_of_subdivs: u32,
pub sah_consts: SAHConstants
}
impl BVHSplitter for SAHSubdivideGuessSplitter {
fn get_spliting_index<T: HasAABoundingBox>(&self, sorted_objects: &[T]) -> usize
where T: HasAABoundingBox + HasSurfaceArea
{
if sorted_objects.len() <= 1 {return 0;}
//the last subdivision may or may not have this size
let subdivision_size = (sorted_objects.len() as u32 / self.number_of_subdivs).max(1);
let mut left_size = subdivision_size;
let mut best_mid_point = 0u32;
let mut best_cost =
sorted_objects.len() as f32 * self.sah_consts.cost_triangle_intersection;
while left_size < sorted_objects.len() as u32 {
let (left_objects, right_objects) = sorted_objects.split_at(left_size as usize);
let cost = surface_area_heuristic(left_objects, right_objects, &self.sah_consts);
if cost < best_cost {
best_cost = cost;
best_mid_point = left_size;
}
left_size += subdivision_size;
}
best_mid_point as usize
}
}
| 32.211765 | 93 | 0.667275 |
28996b40cfdfec0f2fa7d76df348e78311f00525 | 22,072 | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Type-checking for the rust-intrinsic and platform-intrinsic
//! intrinsics that the compiler exposes.
use intrinsics;
use rustc::traits::{ObligationCause, ObligationCauseCode};
use rustc::ty::subst::Substs;
use rustc::ty::{self, TyCtxt, Ty};
use rustc::util::nodemap::FxHashMap;
use require_same_types;
use syntax::abi::Abi;
use syntax::ast;
use syntax::symbol::Symbol;
use syntax_pos::Span;
use rustc::hir;
use std::iter;
fn equate_intrinsic_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
it: &hir::ForeignItem,
n_tps: usize,
abi: Abi,
inputs: Vec<Ty<'tcx>>,
output: Ty<'tcx>) {
let def_id = tcx.hir.local_def_id(it.id);
let substs = Substs::for_item(tcx, def_id,
|_, _| tcx.mk_region(ty::ReErased),
|def, _| tcx.mk_param_from_def(def));
let fty = tcx.mk_fn_def(def_id, substs, ty::Binder(tcx.mk_fn_sig(
inputs.into_iter(),
output,
false,
hir::Unsafety::Unsafe,
abi
)));
let i_n_tps = tcx.item_generics(def_id).types.len();
if i_n_tps != n_tps {
let span = match it.node {
hir::ForeignItemFn(_, _, ref generics) => generics.span,
hir::ForeignItemStatic(..) => it.span
};
struct_span_err!(tcx.sess, span, E0094,
"intrinsic has wrong number of type \
parameters: found {}, expected {}",
i_n_tps, n_tps)
.span_label(span, &format!("expected {} type parameter", n_tps))
.emit();
} else {
require_same_types(tcx,
&ObligationCause::new(it.span,
it.id,
ObligationCauseCode::IntrinsicType),
tcx.item_type(def_id),
fty);
}
}
/// Remember to add all intrinsics here, in librustc_trans/trans/intrinsic.rs,
/// and in libcore/intrinsics.rs
pub fn check_intrinsic_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
it: &hir::ForeignItem) {
let param = |n| tcx.mk_param(n, Symbol::intern(&format!("P{}", n)));
let name = it.name.as_str();
let (n_tps, inputs, output) = if name.starts_with("atomic_") {
let split : Vec<&str> = name.split('_').collect();
assert!(split.len() >= 2, "Atomic intrinsic not correct format");
//We only care about the operation here
let (n_tps, inputs, output) = match split[1] {
"cxchg" | "cxchgweak" => (1, vec![tcx.mk_mut_ptr(param(0)),
param(0),
param(0)],
tcx.intern_tup(&[param(0), tcx.types.bool], false)),
"load" => (1, vec![tcx.mk_imm_ptr(param(0))],
param(0)),
"store" => (1, vec![tcx.mk_mut_ptr(param(0)), param(0)],
tcx.mk_nil()),
"xchg" | "xadd" | "xsub" | "and" | "nand" | "or" | "xor" | "max" |
"min" | "umax" | "umin" => {
(1, vec![tcx.mk_mut_ptr(param(0)), param(0)],
param(0))
}
"fence" | "singlethreadfence" => {
(0, Vec::new(), tcx.mk_nil())
}
op => {
struct_span_err!(tcx.sess, it.span, E0092,
"unrecognized atomic operation function: `{}`", op)
.span_label(it.span, &format!("unrecognized atomic operation"))
.emit();
return;
}
};
(n_tps, inputs, output)
} else if &name[..] == "abort" || &name[..] == "unreachable" {
(0, Vec::new(), tcx.types.never)
} else {
let (n_tps, inputs, output) = match &name[..] {
"breakpoint" => (0, Vec::new(), tcx.mk_nil()),
"size_of" |
"pref_align_of" | "min_align_of" => (1, Vec::new(), tcx.types.usize),
"size_of_val" | "min_align_of_val" => {
(1, vec![
tcx.mk_imm_ref(tcx.mk_region(ty::ReLateBound(ty::DebruijnIndex::new(1),
ty::BrAnon(0))),
param(0))
], tcx.types.usize)
}
"rustc_peek" => (1, vec![param(0)], param(0)),
"init" => (1, Vec::new(), param(0)),
"uninit" => (1, Vec::new(), param(0)),
"forget" => (1, vec![ param(0) ], tcx.mk_nil()),
"transmute" => (2, vec![ param(0) ], param(1)),
"move_val_init" => {
(1,
vec![
tcx.mk_mut_ptr(param(0)),
param(0)
],
tcx.mk_nil())
}
"drop_in_place" => {
(1, vec![tcx.mk_mut_ptr(param(0))], tcx.mk_nil())
}
"needs_drop" => (1, Vec::new(), tcx.types.bool),
"type_name" => (1, Vec::new(), tcx.mk_static_str()),
"type_id" => (1, Vec::new(), tcx.types.u64),
"offset" | "arith_offset" => {
(1,
vec![
tcx.mk_ptr(ty::TypeAndMut {
ty: param(0),
mutbl: hir::MutImmutable
}),
tcx.types.isize
],
tcx.mk_ptr(ty::TypeAndMut {
ty: param(0),
mutbl: hir::MutImmutable
}))
}
"copy" | "copy_nonoverlapping" => {
(1,
vec![
tcx.mk_ptr(ty::TypeAndMut {
ty: param(0),
mutbl: hir::MutImmutable
}),
tcx.mk_ptr(ty::TypeAndMut {
ty: param(0),
mutbl: hir::MutMutable
}),
tcx.types.usize,
],
tcx.mk_nil())
}
"volatile_copy_memory" | "volatile_copy_nonoverlapping_memory" => {
(1,
vec![
tcx.mk_ptr(ty::TypeAndMut {
ty: param(0),
mutbl: hir::MutMutable
}),
tcx.mk_ptr(ty::TypeAndMut {
ty: param(0),
mutbl: hir::MutImmutable
}),
tcx.types.usize,
],
tcx.mk_nil())
}
"write_bytes" | "volatile_set_memory" => {
(1,
vec![
tcx.mk_ptr(ty::TypeAndMut {
ty: param(0),
mutbl: hir::MutMutable
}),
tcx.types.u8,
tcx.types.usize,
],
tcx.mk_nil())
}
"sqrtf32" => (0, vec![ tcx.types.f32 ], tcx.types.f32),
"sqrtf64" => (0, vec![ tcx.types.f64 ], tcx.types.f64),
"powif32" => {
(0,
vec![ tcx.types.f32, tcx.types.i32 ],
tcx.types.f32)
}
"powif64" => {
(0,
vec![ tcx.types.f64, tcx.types.i32 ],
tcx.types.f64)
}
"sinf32" => (0, vec![ tcx.types.f32 ], tcx.types.f32),
"sinf64" => (0, vec![ tcx.types.f64 ], tcx.types.f64),
"cosf32" => (0, vec![ tcx.types.f32 ], tcx.types.f32),
"cosf64" => (0, vec![ tcx.types.f64 ], tcx.types.f64),
"powf32" => {
(0,
vec![ tcx.types.f32, tcx.types.f32 ],
tcx.types.f32)
}
"powf64" => {
(0,
vec![ tcx.types.f64, tcx.types.f64 ],
tcx.types.f64)
}
"expf32" => (0, vec![ tcx.types.f32 ], tcx.types.f32),
"expf64" => (0, vec![ tcx.types.f64 ], tcx.types.f64),
"exp2f32" => (0, vec![ tcx.types.f32 ], tcx.types.f32),
"exp2f64" => (0, vec![ tcx.types.f64 ], tcx.types.f64),
"logf32" => (0, vec![ tcx.types.f32 ], tcx.types.f32),
"logf64" => (0, vec![ tcx.types.f64 ], tcx.types.f64),
"log10f32" => (0, vec![ tcx.types.f32 ], tcx.types.f32),
"log10f64" => (0, vec![ tcx.types.f64 ], tcx.types.f64),
"log2f32" => (0, vec![ tcx.types.f32 ], tcx.types.f32),
"log2f64" => (0, vec![ tcx.types.f64 ], tcx.types.f64),
"fmaf32" => {
(0,
vec![ tcx.types.f32, tcx.types.f32, tcx.types.f32 ],
tcx.types.f32)
}
"fmaf64" => {
(0,
vec![ tcx.types.f64, tcx.types.f64, tcx.types.f64 ],
tcx.types.f64)
}
"fabsf32" => (0, vec![ tcx.types.f32 ], tcx.types.f32),
"fabsf64" => (0, vec![ tcx.types.f64 ], tcx.types.f64),
"copysignf32" => (0, vec![ tcx.types.f32, tcx.types.f32 ], tcx.types.f32),
"copysignf64" => (0, vec![ tcx.types.f64, tcx.types.f64 ], tcx.types.f64),
"floorf32" => (0, vec![ tcx.types.f32 ], tcx.types.f32),
"floorf64" => (0, vec![ tcx.types.f64 ], tcx.types.f64),
"ceilf32" => (0, vec![ tcx.types.f32 ], tcx.types.f32),
"ceilf64" => (0, vec![ tcx.types.f64 ], tcx.types.f64),
"truncf32" => (0, vec![ tcx.types.f32 ], tcx.types.f32),
"truncf64" => (0, vec![ tcx.types.f64 ], tcx.types.f64),
"rintf32" => (0, vec![ tcx.types.f32 ], tcx.types.f32),
"rintf64" => (0, vec![ tcx.types.f64 ], tcx.types.f64),
"nearbyintf32" => (0, vec![ tcx.types.f32 ], tcx.types.f32),
"nearbyintf64" => (0, vec![ tcx.types.f64 ], tcx.types.f64),
"roundf32" => (0, vec![ tcx.types.f32 ], tcx.types.f32),
"roundf64" => (0, vec![ tcx.types.f64 ], tcx.types.f64),
"volatile_load" =>
(1, vec![ tcx.mk_imm_ptr(param(0)) ], param(0)),
"volatile_store" =>
(1, vec![ tcx.mk_mut_ptr(param(0)), param(0) ], tcx.mk_nil()),
"ctpop" | "ctlz" | "cttz" | "bswap" => (1, vec![param(0)], param(0)),
"add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" =>
(1, vec![param(0), param(0)],
tcx.intern_tup(&[param(0), tcx.types.bool], false)),
"unchecked_div" | "unchecked_rem" =>
(1, vec![param(0), param(0)], param(0)),
"overflowing_add" | "overflowing_sub" | "overflowing_mul" =>
(1, vec![param(0), param(0)], param(0)),
"fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" =>
(1, vec![param(0), param(0)], param(0)),
"assume" => (0, vec![tcx.types.bool], tcx.mk_nil()),
"likely" => (0, vec![tcx.types.bool], tcx.types.bool),
"unlikely" => (0, vec![tcx.types.bool], tcx.types.bool),
"discriminant_value" => (1, vec![
tcx.mk_imm_ref(tcx.mk_region(ty::ReLateBound(ty::DebruijnIndex::new(1),
ty::BrAnon(0))),
param(0))], tcx.types.u64),
"try" => {
let mut_u8 = tcx.mk_mut_ptr(tcx.types.u8);
let fn_ty = ty::Binder(tcx.mk_fn_sig(
iter::once(mut_u8),
tcx.mk_nil(),
false,
hir::Unsafety::Normal,
Abi::Rust,
));
(0, vec![tcx.mk_fn_ptr(fn_ty), mut_u8, mut_u8], tcx.types.i32)
}
ref other => {
struct_span_err!(tcx.sess, it.span, E0093,
"unrecognized intrinsic function: `{}`",
*other)
.span_label(it.span, &format!("unrecognized intrinsic"))
.emit();
return;
}
};
(n_tps, inputs, output)
};
equate_intrinsic_type(tcx, it, n_tps, Abi::RustIntrinsic, inputs, output)
}
/// Type-check `extern "platform-intrinsic" { ... }` functions.
pub fn check_platform_intrinsic_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
it: &hir::ForeignItem) {
let param = |n| {
let name = Symbol::intern(&format!("P{}", n));
tcx.mk_param(n, name)
};
let def_id = tcx.hir.local_def_id(it.id);
let i_n_tps = tcx.item_generics(def_id).types.len();
let name = it.name.as_str();
let (n_tps, inputs, output) = match &*name {
"simd_eq" | "simd_ne" | "simd_lt" | "simd_le" | "simd_gt" | "simd_ge" => {
(2, vec![param(0), param(0)], param(1))
}
"simd_add" | "simd_sub" | "simd_mul" |
"simd_div" | "simd_shl" | "simd_shr" |
"simd_and" | "simd_or" | "simd_xor" => {
(1, vec![param(0), param(0)], param(0))
}
"simd_insert" => (2, vec![param(0), tcx.types.u32, param(1)], param(0)),
"simd_extract" => (2, vec![param(0), tcx.types.u32], param(1)),
"simd_cast" => (2, vec![param(0)], param(1)),
name if name.starts_with("simd_shuffle") => {
match name["simd_shuffle".len()..].parse() {
Ok(n) => {
let params = vec![param(0), param(0),
tcx.mk_ty(ty::TyArray(tcx.types.u32, n))];
(2, params, param(1))
}
Err(_) => {
span_err!(tcx.sess, it.span, E0439,
"invalid `simd_shuffle`, needs length: `{}`", name);
return
}
}
}
_ => {
match intrinsics::Intrinsic::find(&name) {
Some(intr) => {
// this function is a platform specific intrinsic
if i_n_tps != 0 {
span_err!(tcx.sess, it.span, E0440,
"platform-specific intrinsic has wrong number of type \
parameters: found {}, expected 0",
i_n_tps);
return
}
let mut structural_to_nomimal = FxHashMap();
let sig = tcx.item_type(def_id).fn_sig();
let sig = tcx.no_late_bound_regions(&sig).unwrap();
if intr.inputs.len() != sig.inputs().len() {
span_err!(tcx.sess, it.span, E0444,
"platform-specific intrinsic has invalid number of \
arguments: found {}, expected {}",
sig.inputs().len(), intr.inputs.len());
return
}
let input_pairs = intr.inputs.iter().zip(sig.inputs());
for (i, (expected_arg, arg)) in input_pairs.enumerate() {
match_intrinsic_type_to_type(tcx, &format!("argument {}", i + 1), it.span,
&mut structural_to_nomimal, expected_arg, arg);
}
match_intrinsic_type_to_type(tcx, "return value", it.span,
&mut structural_to_nomimal,
&intr.output, sig.output());
return
}
None => {
span_err!(tcx.sess, it.span, E0441,
"unrecognized platform-specific intrinsic function: `{}`", name);
return;
}
}
}
};
equate_intrinsic_type(tcx, it, n_tps, Abi::PlatformIntrinsic,
inputs, output)
}
// walk the expected type and the actual type in lock step, checking they're
// the same, in a kinda-structural way, i.e. `Vector`s have to be simd structs with
// exactly the right element type
fn match_intrinsic_type_to_type<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
position: &str,
span: Span,
structural_to_nominal: &mut FxHashMap<&'a intrinsics::Type, ty::Ty<'tcx>>,
expected: &'a intrinsics::Type, t: ty::Ty<'tcx>)
{
use intrinsics::Type::*;
let simple_error = |real: &str, expected: &str| {
span_err!(tcx.sess, span, E0442,
"intrinsic {} has wrong type: found {}, expected {}",
position, real, expected)
};
match *expected {
Void => match t.sty {
ty::TyTuple(ref v, _) if v.is_empty() => {},
_ => simple_error(&format!("`{}`", t), "()"),
},
// (The width we pass to LLVM doesn't concern the type checker.)
Integer(signed, bits, _llvm_width) => match (signed, bits, &t.sty) {
(true, 8, &ty::TyInt(ast::IntTy::I8)) |
(false, 8, &ty::TyUint(ast::UintTy::U8)) |
(true, 16, &ty::TyInt(ast::IntTy::I16)) |
(false, 16, &ty::TyUint(ast::UintTy::U16)) |
(true, 32, &ty::TyInt(ast::IntTy::I32)) |
(false, 32, &ty::TyUint(ast::UintTy::U32)) |
(true, 64, &ty::TyInt(ast::IntTy::I64)) |
(false, 64, &ty::TyUint(ast::UintTy::U64)) |
(true, 128, &ty::TyInt(ast::IntTy::I128)) |
(false, 128, &ty::TyUint(ast::UintTy::U128)) => {},
_ => simple_error(&format!("`{}`", t),
&format!("`{}{n}`",
if signed {"i"} else {"u"},
n = bits)),
},
Float(bits) => match (bits, &t.sty) {
(32, &ty::TyFloat(ast::FloatTy::F32)) |
(64, &ty::TyFloat(ast::FloatTy::F64)) => {},
_ => simple_error(&format!("`{}`", t),
&format!("`f{n}`", n = bits)),
},
Pointer(ref inner_expected, ref _llvm_type, const_) => {
match t.sty {
ty::TyRawPtr(ty::TypeAndMut { ty, mutbl }) => {
if (mutbl == hir::MutImmutable) != const_ {
simple_error(&format!("`{}`", t),
if const_ {"const pointer"} else {"mut pointer"})
}
match_intrinsic_type_to_type(tcx, position, span, structural_to_nominal,
inner_expected, ty)
}
_ => simple_error(&format!("`{}`", t), "raw pointer"),
}
}
Vector(ref inner_expected, ref _llvm_type, len) => {
if !t.is_simd() {
simple_error(&format!("non-simd type `{}`", t), "simd type");
return;
}
let t_len = t.simd_size(tcx);
if len as usize != t_len {
simple_error(&format!("vector with length {}", t_len),
&format!("length {}", len));
return;
}
let t_ty = t.simd_type(tcx);
{
// check that a given structural type always has the same an intrinsic definition
let previous = structural_to_nominal.entry(expected).or_insert(t);
if *previous != t {
// this gets its own error code because it is non-trivial
span_err!(tcx.sess, span, E0443,
"intrinsic {} has wrong type: found `{}`, expected `{}` which \
was used for this vector type previously in this signature",
position,
t,
*previous);
return;
}
}
match_intrinsic_type_to_type(tcx,
position,
span,
structural_to_nominal,
inner_expected,
t_ty)
}
Aggregate(_flatten, ref expected_contents) => {
match t.sty {
ty::TyTuple(contents, _) => {
if contents.len() != expected_contents.len() {
simple_error(&format!("tuple with length {}", contents.len()),
&format!("tuple with length {}", expected_contents.len()));
return
}
for (e, c) in expected_contents.iter().zip(contents) {
match_intrinsic_type_to_type(tcx, position, span, structural_to_nominal,
e, c)
}
}
_ => simple_error(&format!("`{}`", t),
&format!("tuple")),
}
}
}
}
| 43.109375 | 100 | 0.430364 |
abb2ac82da027996be3987a63e8531ce351e7be4 | 91 | pub mod capture;
mod win_id;
mod core_foundation_sys_patches;
pub use win_id::window_list; | 18.2 | 32 | 0.824176 |
0ae3a33a5c5f70722d70449af22d389bd70f3919 | 10,862 | #![deny(warnings)]
extern crate arg_parser;
extern crate extra;
use std::cell::Cell; // Provide mutable fields in immutable structs
use std::env;
use std::error::Error;
use std::fs;
use std::io::{self, BufReader, Read, Stderr, StdoutLock, Write};
use std::process::exit;
use extra::option::OptionalExt;
use arg_parser::ArgParser;
const MAN_PAGE: &'static str = /* @MANSTART{cat} */ r#"NAME
cat - concatenate files and print on the standard output
SYNOPSIS
cat [-h | --help] [-A | --show-all] [-b | --number-nonblank] [-e] [-E | --show-ends]
[-n | --number] [-s | --squeeze-blank] [-t] [-T] FILES...
DESCRIPTION
Concatenates all files to the standard output.
If no file is given, or if FILE is '-', read from standard input.
OPTIONS
-A
--show-all
equivalent to -vET
-b
--number-nonblank
number nonempty output lines, overriding -n
-e
equivalent to -vE
-E
--show-ends
display $ at the end of each line
-n
--number
number all output lines
-s
--squeeze-blank
supress repeated empty output lines
-t
equivalent to -vT
-T
--show_tabs
display TAB characters as ^I
-v
--show-nonprinting
use caret (^) and M- notation, except for LFD and TAB.
-h
--help
display this help and exit
AUTHOR
Written by Michael Murphy.
"#; /* @MANEND */
struct Program {
exit_status: Cell<i32>,
number: bool,
number_nonblank: bool,
show_ends: bool,
show_tabs: bool,
show_nonprinting: bool,
squeeze_blank: bool,
paths: Vec<String>,
}
impl Program {
/// Initialize the program's arguments and flags.
fn initialize(stdout: &mut StdoutLock, stderr: &mut Stderr) -> Program {
let mut parser = ArgParser::new(10).
add_flag(&["A", "show-all"]). //vET
add_flag(&["b", "number-nonblank"]).
add_flag(&["e"]). //vE
add_flag(&["E", "show-ends"]).
add_flag(&["n", "number"]).
add_flag(&["s", "squeeze-blank"]).
add_flag(&["t"]). //vT
add_flag(&["T", "show-tabs"]).
add_flag(&["v", "show-nonprinting"]).
add_flag(&["h", "help"]);
parser.parse(env::args());
let mut cat = Program {
exit_status: Cell::new(0i32),
number: false,
number_nonblank: false,
show_ends: false,
show_tabs: false,
show_nonprinting: false,
squeeze_blank: false,
paths: Vec::with_capacity(parser.args.len()),
};
if parser.found("help") {
stdout.write(MAN_PAGE.as_bytes()).try(stderr);
stdout.flush().try(stderr);
exit(0);
}
if parser.found("show-all") {
cat.show_nonprinting = true;
cat.show_ends = true;
cat.show_tabs = true;
}
if parser.found("number") {
cat.number = true;
cat.number_nonblank = false;
}
if parser.found("number-nonblank") {
cat.number_nonblank = true;
cat.number = false;
}
if parser.found("show-ends") || parser.found(&'e') {
cat.show_ends = true;
}
if parser.found("squeeze-blank") {
cat.squeeze_blank = true;
}
if parser.found("show-tabs") || parser.found(&'t') {
cat.show_tabs = true;
}
if parser.found("show-nonprinting") || parser.found(&'e') || parser.found(&'t') {
cat.show_nonprinting = true;
}
if !parser.args.is_empty() {
cat.paths = parser.args;
}
cat
}
/// Execute the parameters given to the program.
fn and_execute(&self, stdout: &mut StdoutLock, stderr: &mut Stderr) -> i32 {
let stdin = io::stdin();
let line_count = &mut 0usize;
let flags_enabled = self.number || self.number_nonblank || self.show_ends || self.show_tabs ||
self.squeeze_blank || self.show_nonprinting;
if self.paths.is_empty() && flags_enabled {
self.cat(&mut stdin.lock(), line_count, stdout, stderr);
} else if self.paths.is_empty() {
io::copy(&mut stdin.lock(), stdout).try(stderr);
} else {
for path in &self.paths {
if flags_enabled && path == "-" {
self.cat(&mut stdin.lock(), line_count, stdout, stderr);
} else if path == "-" {
// Copy the standard input directly to the standard output.
io::copy(&mut stdin.lock(), stdout).try(stderr);
} else if fs::metadata(&path).map(|m| m.is_dir()).unwrap_or(false) {
stderr.write(path.as_bytes()).try(stderr);
stderr.write(b": Is a directory\n").try(stderr);
stderr.flush().try(stderr);
self.exit_status.set(1i32);
} else if flags_enabled {
fs::File::open(&path)
// Open the file and copy the file's contents to standard output based input arguments.
.map(|file| self.cat(BufReader::new(file), line_count, stdout, stderr))
// If an error occurred, print the error and set the exit status.
.unwrap_or_else(|message| {
stderr.write(path.as_bytes()).try(stderr);
stderr.write(b": ").try(stderr);
stderr.write(message.description().as_bytes()).try(stderr);
stderr.write(b"\n").try(stderr);
stderr.flush().try(stderr);
self.exit_status.set(1i32);
});
} else {
// Open a file and copy the contents directly to standard output.
fs::File::open(&path).map(|ref mut file| { io::copy(file, stdout).try(stderr); })
// If an error occurs, print the error and set the exit status.
.unwrap_or_else(|message| {
stderr.write(path.as_bytes()).try(stderr);
stderr.write(b": ").try(stderr);
stderr.write(message.description().as_bytes()).try(stderr);
stderr.write(b"\n").try(stderr);
stderr.flush().try(stderr);
self.exit_status.set(1i32);
});
}
}
}
self.exit_status.get()
}
/// Cats either a file or stdin based on the flag arguments given to the program.
fn cat<F: Read>(&self, file: F, line_count: &mut usize, stdout: &mut StdoutLock, stderr: &mut Stderr) {
let mut character_count = 0;
let mut last_line_was_blank = false;
for byte in file.bytes().map(|x| x.unwrap()) {
if (self.number && character_count == 0) || (character_count == 0 && self.number_nonblank && byte != b'\n') {
stdout.write(b" ").try(stderr);
stdout.write(line_count.to_string().as_bytes()).try(stderr);
stdout.write(b" ").try(stderr);
*line_count += 1;
}
match byte {
0...8 | 11...31 => if self.show_nonprinting {
push_caret(stdout, stderr, byte+64);
count_character(&mut character_count, &self.number, &self.number_nonblank);
},
9 => {
if self.show_tabs {
push_caret(stdout, stderr, b'I');
} else {
stdout.write(&[byte]).try(stderr);
}
count_character(&mut character_count, &self.number, &self.number_nonblank);
}
10 => {
if character_count == 0 {
if self.squeeze_blank && last_line_was_blank {
continue
} else if !last_line_was_blank {
last_line_was_blank = true;
}
} else {
last_line_was_blank = false;
character_count = 0;
}
if self.show_ends {
stdout.write(b"$\n").try(stderr);
} else {
stdout.write(b"\n").try(stderr);
}
stdout.flush().try(stderr);
},
32...126 => {
stdout.write(&[byte]).try(stderr);
count_character(&mut character_count, &self.number, &self.number_nonblank);
},
127 => if self.show_nonprinting {
push_caret(stdout, stderr, b'?');
count_character(&mut character_count, &self.number, &self.number_nonblank);
},
128...159 => if self.show_nonprinting {
stdout.write(b"M-^").try(stderr);
stdout.write(&[byte-64]).try(stderr);
count_character(&mut character_count, &self.number, &self.number_nonblank);
} else {
stdout.write(&[byte]).try(stderr);
count_character(&mut character_count, &self.number, &self.number_nonblank);
},
_ => if self.show_nonprinting {
stdout.write(b"M-").try(stderr);
stdout.write(&[byte-128]).try(stderr);
count_character(&mut character_count, &self.number, &self.number_nonblank);
} else {
stdout.write(&[byte]).try(stderr);
count_character(&mut character_count, &self.number, &self.number_nonblank);
},
}
}
}
}
/// Increase the character count by one if number printing is enabled.
fn count_character(character_count: &mut usize, number: &bool, number_nonblank: &bool) {
if *number || *number_nonblank {
*character_count += 1;
}
}
/// Print a caret notation to stdout.
fn push_caret(stdout: &mut StdoutLock, stderr: &mut Stderr, notation: u8) {
stdout.write(&[b'^']).try(stderr);
stdout.write(&[notation]).try(stderr);
}
fn main() {
let stdout = io::stdout();
let mut stdout = stdout.lock();
let mut stderr = io::stderr();
exit(Program::initialize(&mut stdout, &mut stderr).and_execute(&mut stdout, &mut stderr));
}
| 36.695946 | 121 | 0.498343 |