2018-09-08 18:36:38 +02:00
|
|
|
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
2018-09-07 22:06:51 +02:00
|
|
|
use std::sync::Arc;
|
2018-09-14 08:46:01 +02:00
|
|
|
use std::{mem, net, time};
|
2018-08-19 19:47:04 +02:00
|
|
|
|
2018-09-14 08:46:01 +02:00
|
|
|
use futures::sync::mpsc::{UnboundedReceiver, UnboundedSender};
|
2018-08-19 19:47:04 +02:00
|
|
|
use futures::sync::oneshot;
|
2018-09-14 08:46:01 +02:00
|
|
|
use futures::{future, Async, Future, Poll, Stream};
|
2018-09-08 18:36:38 +02:00
|
|
|
use tokio_current_thread::spawn;
|
2018-09-14 08:46:01 +02:00
|
|
|
use tokio_timer::{sleep, Delay};
|
2018-08-19 19:47:04 +02:00
|
|
|
|
|
|
|
use actix::msgs::StopArbiter;
|
2018-09-14 08:46:01 +02:00
|
|
|
use actix::{Arbiter, Message};
|
2018-08-19 19:47:04 +02:00
|
|
|
|
2018-09-07 22:06:51 +02:00
|
|
|
use super::accept::AcceptNotify;
|
2018-09-27 05:40:45 +02:00
|
|
|
use super::services::{BoxedServerService, InternalServiceFactory, ServerMessage};
|
2018-09-07 22:06:51 +02:00
|
|
|
use super::Token;
|
2018-09-14 22:07:38 +02:00
|
|
|
use counter::Counter;
|
2018-08-19 19:47:04 +02:00
|
|
|
|
2018-11-01 23:33:35 +01:00
|
|
|
pub(crate) struct WorkerCommand(Conn);
|
|
|
|
|
|
|
|
/// Stop worker message. Returns `true` on successful shutdown
|
|
|
|
/// and `false` if some connections still alive.
|
|
|
|
pub(crate) struct StopCommand {
|
|
|
|
graceful: bool,
|
|
|
|
result: oneshot::Sender<bool>,
|
2018-09-14 08:46:01 +02:00
|
|
|
}
|
|
|
|
|
2018-09-13 22:32:51 +02:00
|
|
|
#[derive(Debug, Message)]
|
2018-08-19 19:47:04 +02:00
|
|
|
pub(crate) struct Conn {
|
|
|
|
pub io: net::TcpStream,
|
|
|
|
pub handler: Token,
|
|
|
|
pub token: Token,
|
|
|
|
pub peer: Option<net::SocketAddr>,
|
|
|
|
}
|
|
|
|
|
2018-09-08 18:36:38 +02:00
|
|
|
const MAX_CONNS: AtomicUsize = AtomicUsize::new(25600);
|
|
|
|
|
|
|
|
/// Sets the maximum per-worker number of concurrent connections.
|
|
|
|
///
|
|
|
|
/// All socket listeners will stop accepting connections when this limit is
|
|
|
|
/// reached for each worker.
|
|
|
|
///
|
|
|
|
/// By default max connections is set to a 25k per worker.
|
|
|
|
pub fn max_concurrent_connections(num: usize) {
|
|
|
|
MAX_CONNS.store(num, Ordering::Relaxed);
|
|
|
|
}
|
|
|
|
|
|
|
|
pub(crate) fn num_connections() -> usize {
|
|
|
|
MAX_CONNS_COUNTER.with(|conns| conns.total())
|
|
|
|
}
|
|
|
|
|
|
|
|
thread_local! {
|
2018-09-14 22:07:38 +02:00
|
|
|
static MAX_CONNS_COUNTER: Counter =
|
|
|
|
Counter::new(MAX_CONNS.load(Ordering::Relaxed));
|
2018-09-08 18:36:38 +02:00
|
|
|
}
|
|
|
|
|
2018-08-19 19:47:04 +02:00
|
|
|
#[derive(Clone)]
|
|
|
|
pub(crate) struct WorkerClient {
|
|
|
|
pub idx: usize,
|
2018-11-01 23:33:35 +01:00
|
|
|
tx1: UnboundedSender<WorkerCommand>,
|
|
|
|
tx2: UnboundedSender<StopCommand>,
|
2018-09-07 22:06:51 +02:00
|
|
|
avail: WorkerAvailability,
|
2018-08-19 19:47:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
impl WorkerClient {
|
2018-09-14 08:46:01 +02:00
|
|
|
pub fn new(
|
2018-10-30 04:29:47 +01:00
|
|
|
idx: usize,
|
2018-11-01 23:33:35 +01:00
|
|
|
tx1: UnboundedSender<WorkerCommand>,
|
|
|
|
tx2: UnboundedSender<StopCommand>,
|
2018-10-30 04:29:47 +01:00
|
|
|
avail: WorkerAvailability,
|
2018-09-14 08:46:01 +02:00
|
|
|
) -> Self {
|
2018-11-01 23:33:35 +01:00
|
|
|
WorkerClient {
|
|
|
|
idx,
|
|
|
|
tx1,
|
|
|
|
tx2,
|
|
|
|
avail,
|
|
|
|
}
|
2018-08-19 19:47:04 +02:00
|
|
|
}
|
|
|
|
|
2018-09-14 08:46:01 +02:00
|
|
|
pub fn send(&self, msg: Conn) -> Result<(), Conn> {
|
2018-11-01 23:33:35 +01:00
|
|
|
self.tx1
|
|
|
|
.unbounded_send(WorkerCommand(msg))
|
|
|
|
.map_err(|msg| msg.into_inner().0)
|
2018-08-19 19:47:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn available(&self) -> bool {
|
2018-09-07 22:06:51 +02:00
|
|
|
self.avail.available()
|
|
|
|
}
|
2018-09-14 08:46:01 +02:00
|
|
|
|
2018-09-27 05:40:45 +02:00
|
|
|
pub fn stop(&self, graceful: bool) -> oneshot::Receiver<bool> {
|
2018-11-01 23:33:35 +01:00
|
|
|
let (result, rx) = oneshot::channel();
|
|
|
|
let _ = self.tx2.unbounded_send(StopCommand { graceful, result });
|
2018-09-14 08:46:01 +02:00
|
|
|
rx
|
|
|
|
}
|
2018-09-07 22:06:51 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Clone)]
|
|
|
|
pub(crate) struct WorkerAvailability {
|
|
|
|
notify: AcceptNotify,
|
|
|
|
available: Arc<AtomicBool>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl WorkerAvailability {
|
|
|
|
pub fn new(notify: AcceptNotify) -> Self {
|
|
|
|
WorkerAvailability {
|
|
|
|
notify,
|
|
|
|
available: Arc::new(AtomicBool::new(false)),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn available(&self) -> bool {
|
|
|
|
self.available.load(Ordering::Acquire)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn set(&self, val: bool) {
|
|
|
|
let old = self.available.swap(val, Ordering::Release);
|
|
|
|
if !old && val {
|
|
|
|
self.notify.notify()
|
|
|
|
}
|
2018-08-19 19:47:04 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-18 05:19:48 +02:00
|
|
|
/// Service worker
|
2018-08-19 19:47:04 +02:00
|
|
|
///
|
2018-09-18 05:19:48 +02:00
|
|
|
/// Worker accepts Socket objects via unbounded channel and starts stream
|
2018-08-19 19:47:04 +02:00
|
|
|
/// processing.
|
|
|
|
pub(crate) struct Worker {
|
2018-09-14 08:46:01 +02:00
|
|
|
rx: UnboundedReceiver<WorkerCommand>,
|
2018-11-01 23:33:35 +01:00
|
|
|
rx2: UnboundedReceiver<StopCommand>,
|
2018-08-19 19:47:04 +02:00
|
|
|
services: Vec<BoxedServerService>,
|
2018-09-07 22:06:51 +02:00
|
|
|
availability: WorkerAvailability,
|
2018-09-14 22:07:38 +02:00
|
|
|
conns: Counter,
|
2018-09-27 05:40:45 +02:00
|
|
|
factories: Vec<Box<InternalServiceFactory>>,
|
2018-09-14 08:46:01 +02:00
|
|
|
state: WorkerState,
|
2018-09-27 05:40:45 +02:00
|
|
|
shutdown_timeout: time::Duration,
|
2018-08-19 19:47:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Worker {
|
2018-09-14 08:46:01 +02:00
|
|
|
pub(crate) fn start(
|
2018-10-30 04:29:47 +01:00
|
|
|
rx: UnboundedReceiver<WorkerCommand>,
|
2018-11-01 23:33:35 +01:00
|
|
|
rx2: UnboundedReceiver<StopCommand>,
|
2018-10-30 04:29:47 +01:00
|
|
|
factories: Vec<Box<InternalServiceFactory>>,
|
|
|
|
availability: WorkerAvailability,
|
|
|
|
shutdown_timeout: time::Duration,
|
2018-09-14 08:46:01 +02:00
|
|
|
) {
|
2018-09-12 17:25:14 +02:00
|
|
|
availability.set(false);
|
2018-09-14 08:46:01 +02:00
|
|
|
let mut wrk = MAX_CONNS_COUNTER.with(|conns| Worker {
|
|
|
|
rx,
|
2018-11-01 23:33:35 +01:00
|
|
|
rx2,
|
2018-09-07 22:06:51 +02:00
|
|
|
availability,
|
2018-09-12 17:25:14 +02:00
|
|
|
factories,
|
2018-09-27 05:40:45 +02:00
|
|
|
shutdown_timeout,
|
2018-08-19 19:47:04 +02:00
|
|
|
services: Vec::new(),
|
2018-09-08 18:36:38 +02:00
|
|
|
conns: conns.clone(),
|
2018-09-14 08:46:01 +02:00
|
|
|
state: WorkerState::Unavailable(Vec::new()),
|
2018-09-08 18:36:38 +02:00
|
|
|
});
|
2018-08-19 19:47:04 +02:00
|
|
|
|
2018-09-12 17:25:14 +02:00
|
|
|
let mut fut = Vec::new();
|
|
|
|
for factory in &wrk.factories {
|
|
|
|
fut.push(factory.create());
|
|
|
|
}
|
2018-09-14 08:46:01 +02:00
|
|
|
spawn(
|
2018-09-12 17:25:14 +02:00
|
|
|
future::join_all(fut)
|
2018-09-14 08:46:01 +02:00
|
|
|
.map_err(|e| {
|
2018-08-19 19:47:04 +02:00
|
|
|
error!("Can not start worker: {:?}", e);
|
|
|
|
Arbiter::current().do_send(StopArbiter(0));
|
2018-10-30 04:29:47 +01:00
|
|
|
}).and_then(move |services| {
|
2018-09-14 08:46:01 +02:00
|
|
|
wrk.services.extend(services);
|
|
|
|
wrk
|
2018-08-19 19:47:04 +02:00
|
|
|
}),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2018-09-07 22:06:51 +02:00
|
|
|
fn shutdown(&mut self, force: bool) {
|
|
|
|
if force {
|
|
|
|
self.services.iter_mut().for_each(|h| {
|
2018-09-27 05:40:45 +02:00
|
|
|
let _ = h.call((None, ServerMessage::ForceShutdown));
|
2018-09-07 22:06:51 +02:00
|
|
|
});
|
|
|
|
} else {
|
2018-09-27 05:40:45 +02:00
|
|
|
let timeout = self.shutdown_timeout;
|
|
|
|
self.services.iter_mut().for_each(move |h| {
|
|
|
|
let _ = h.call((None, ServerMessage::Shutdown(timeout.clone())));
|
2018-09-07 22:06:51 +02:00
|
|
|
});
|
|
|
|
}
|
2018-08-19 19:47:04 +02:00
|
|
|
}
|
|
|
|
|
2018-09-18 05:19:48 +02:00
|
|
|
fn check_readiness(&mut self, trace: bool) -> Result<bool, usize> {
|
2018-09-14 22:12:55 +02:00
|
|
|
let mut ready = self.conns.available();
|
2018-09-14 08:46:01 +02:00
|
|
|
let mut failed = None;
|
|
|
|
for (idx, service) in self.services.iter_mut().enumerate() {
|
|
|
|
match service.poll_ready() {
|
2018-09-18 05:19:48 +02:00
|
|
|
Ok(Async::Ready(_)) => {
|
|
|
|
if trace {
|
|
|
|
trace!("Service {:?} is available", self.factories[idx].name());
|
|
|
|
}
|
|
|
|
}
|
2018-09-14 08:46:01 +02:00
|
|
|
Ok(Async::NotReady) => ready = false,
|
|
|
|
Err(_) => {
|
2018-09-18 05:19:48 +02:00
|
|
|
error!(
|
|
|
|
"Service {:?} readiness check returned error, restarting",
|
|
|
|
self.factories[idx].name()
|
|
|
|
);
|
2018-09-14 08:46:01 +02:00
|
|
|
failed = Some(idx);
|
|
|
|
}
|
2018-08-21 07:21:23 +02:00
|
|
|
}
|
2018-09-14 08:46:01 +02:00
|
|
|
}
|
|
|
|
if let Some(idx) = failed {
|
|
|
|
Err(idx)
|
2018-08-21 07:21:23 +02:00
|
|
|
} else {
|
2018-09-14 08:46:01 +02:00
|
|
|
Ok(ready)
|
2018-08-21 07:21:23 +02:00
|
|
|
}
|
2018-08-19 19:47:04 +02:00
|
|
|
}
|
|
|
|
}
|
2018-09-07 22:06:51 +02:00
|
|
|
|
2018-09-14 08:46:01 +02:00
|
|
|
enum WorkerState {
|
|
|
|
None,
|
|
|
|
Available,
|
|
|
|
Unavailable(Vec<Conn>),
|
|
|
|
Restarting(usize, Box<Future<Item = BoxedServerService, Error = ()>>),
|
|
|
|
Shutdown(Delay, Delay, oneshot::Sender<bool>),
|
2018-09-12 17:25:14 +02:00
|
|
|
}
|
2018-09-07 22:06:51 +02:00
|
|
|
|
2018-09-14 08:46:01 +02:00
|
|
|
impl Future for Worker {
|
2018-09-07 22:06:51 +02:00
|
|
|
type Item = ();
|
|
|
|
type Error = ();
|
2018-09-14 08:46:01 +02:00
|
|
|
|
|
|
|
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
2018-11-01 23:33:35 +01:00
|
|
|
// `StopWorker` message handler
|
|
|
|
match self.rx2.poll() {
|
|
|
|
Ok(Async::Ready(Some(StopCommand { graceful, result }))) => {
|
|
|
|
self.availability.set(false);
|
|
|
|
let num = num_connections();
|
|
|
|
if num == 0 {
|
|
|
|
info!("Shutting down worker, 0 connections");
|
|
|
|
let _ = result.send(true);
|
|
|
|
return Ok(Async::Ready(()));
|
|
|
|
} else if graceful {
|
|
|
|
self.shutdown(false);
|
|
|
|
let num = num_connections();
|
|
|
|
if num != 0 {
|
|
|
|
info!("Graceful worker shutdown, {} connections", num);
|
|
|
|
self.state = WorkerState::Shutdown(
|
|
|
|
sleep(time::Duration::from_secs(1)),
|
|
|
|
sleep(self.shutdown_timeout),
|
|
|
|
result,
|
|
|
|
);
|
|
|
|
} else {
|
|
|
|
let _ = result.send(true);
|
|
|
|
return Ok(Async::Ready(()));
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
info!("Force shutdown worker, {} connections", num);
|
|
|
|
self.shutdown(true);
|
|
|
|
let _ = result.send(false);
|
|
|
|
return Ok(Async::Ready(()));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_ => (),
|
|
|
|
}
|
|
|
|
|
2018-09-14 08:46:01 +02:00
|
|
|
let state = mem::replace(&mut self.state, WorkerState::None);
|
|
|
|
|
|
|
|
match state {
|
|
|
|
WorkerState::Unavailable(mut conns) => {
|
2018-09-18 05:19:48 +02:00
|
|
|
match self.check_readiness(true) {
|
2018-09-14 08:46:01 +02:00
|
|
|
Ok(true) => {
|
|
|
|
self.state = WorkerState::Available;
|
|
|
|
|
|
|
|
// process requests from wait queue
|
|
|
|
while let Some(msg) = conns.pop() {
|
2018-09-18 05:19:48 +02:00
|
|
|
match self.check_readiness(false) {
|
2018-09-14 08:46:01 +02:00
|
|
|
Ok(true) => {
|
|
|
|
let guard = self.conns.get();
|
2018-09-27 05:40:45 +02:00
|
|
|
let _ = self.services[msg.handler.0]
|
|
|
|
.call((Some(guard), ServerMessage::Connect(msg.io)));
|
2018-09-14 08:46:01 +02:00
|
|
|
}
|
|
|
|
Ok(false) => {
|
2018-09-18 05:19:48 +02:00
|
|
|
trace!("Worker is unavailable");
|
2018-09-14 08:46:01 +02:00
|
|
|
self.state = WorkerState::Unavailable(conns);
|
|
|
|
return self.poll();
|
|
|
|
}
|
|
|
|
Err(idx) => {
|
2018-09-18 05:19:48 +02:00
|
|
|
trace!(
|
|
|
|
"Service {:?} failed, restarting",
|
|
|
|
self.factories[idx].name()
|
|
|
|
);
|
2018-09-14 08:46:01 +02:00
|
|
|
self.state = WorkerState::Restarting(
|
|
|
|
idx,
|
|
|
|
self.factories[idx].create(),
|
|
|
|
);
|
|
|
|
return self.poll();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
self.availability.set(true);
|
|
|
|
return self.poll();
|
|
|
|
}
|
|
|
|
Ok(false) => {
|
|
|
|
self.state = WorkerState::Unavailable(conns);
|
|
|
|
return Ok(Async::NotReady);
|
|
|
|
}
|
|
|
|
Err(idx) => {
|
2018-09-18 05:19:48 +02:00
|
|
|
trace!(
|
|
|
|
"Service {:?} failed, restarting",
|
|
|
|
self.factories[idx].name()
|
|
|
|
);
|
2018-09-14 08:46:01 +02:00
|
|
|
self.state = WorkerState::Restarting(idx, self.factories[idx].create());
|
|
|
|
return self.poll();
|
|
|
|
}
|
2018-09-12 17:25:14 +02:00
|
|
|
}
|
2018-09-14 08:46:01 +02:00
|
|
|
}
|
|
|
|
WorkerState::Restarting(idx, mut fut) => {
|
|
|
|
match fut.poll() {
|
|
|
|
Ok(Async::Ready(service)) => {
|
2018-09-18 05:19:48 +02:00
|
|
|
trace!(
|
|
|
|
"Service {:?} has been restarted",
|
|
|
|
self.factories[idx].name()
|
|
|
|
);
|
2018-09-14 08:46:01 +02:00
|
|
|
self.services[idx] = service;
|
|
|
|
self.state = WorkerState::Unavailable(Vec::new());
|
|
|
|
}
|
|
|
|
Ok(Async::NotReady) => {
|
|
|
|
self.state = WorkerState::Restarting(idx, fut);
|
|
|
|
return Ok(Async::NotReady);
|
|
|
|
}
|
|
|
|
Err(_) => {
|
2018-09-18 05:19:48 +02:00
|
|
|
panic!("Can not restart {:?} service", self.factories[idx].name());
|
2018-09-14 08:46:01 +02:00
|
|
|
}
|
2018-09-12 17:25:14 +02:00
|
|
|
}
|
2018-09-14 08:46:01 +02:00
|
|
|
return self.poll();
|
2018-09-12 17:25:14 +02:00
|
|
|
}
|
2018-09-14 08:46:01 +02:00
|
|
|
WorkerState::Shutdown(mut t1, mut t2, tx) => {
|
|
|
|
let num = num_connections();
|
|
|
|
if num == 0 {
|
|
|
|
let _ = tx.send(true);
|
|
|
|
Arbiter::current().do_send(StopArbiter(0));
|
|
|
|
return Ok(Async::Ready(()));
|
|
|
|
}
|
2018-09-12 17:25:14 +02:00
|
|
|
|
2018-09-14 08:46:01 +02:00
|
|
|
// check graceful timeout
|
|
|
|
match t2.poll().unwrap() {
|
|
|
|
Async::NotReady => (),
|
|
|
|
Async::Ready(_) => {
|
|
|
|
self.shutdown(true);
|
|
|
|
let _ = tx.send(false);
|
|
|
|
Arbiter::current().do_send(StopArbiter(0));
|
|
|
|
return Ok(Async::Ready(()));
|
2018-09-12 17:25:14 +02:00
|
|
|
}
|
2018-09-08 18:36:38 +02:00
|
|
|
}
|
2018-09-14 08:46:01 +02:00
|
|
|
|
|
|
|
// sleep for 1 second and then check again
|
|
|
|
match t1.poll().unwrap() {
|
|
|
|
Async::NotReady => (),
|
|
|
|
Async::Ready(_) => {
|
|
|
|
t1 = sleep(time::Duration::from_secs(1));
|
|
|
|
let _ = t1.poll();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
self.state = WorkerState::Shutdown(t1, t2, tx);
|
|
|
|
return Ok(Async::NotReady);
|
2018-09-07 22:06:51 +02:00
|
|
|
}
|
2018-09-14 08:46:01 +02:00
|
|
|
WorkerState::Available => {
|
|
|
|
loop {
|
|
|
|
match self.rx.poll() {
|
|
|
|
// handle incoming tcp stream
|
2018-11-01 23:33:35 +01:00
|
|
|
Ok(Async::Ready(Some(WorkerCommand(msg)))) => {
|
2018-09-18 05:19:48 +02:00
|
|
|
match self.check_readiness(false) {
|
2018-09-14 09:07:50 +02:00
|
|
|
Ok(true) => {
|
|
|
|
let guard = self.conns.get();
|
2018-09-27 05:40:45 +02:00
|
|
|
let _ = self.services[msg.handler.0]
|
|
|
|
.call((Some(guard), ServerMessage::Connect(msg.io)));
|
2018-09-14 22:07:38 +02:00
|
|
|
continue;
|
2018-09-14 09:07:50 +02:00
|
|
|
}
|
|
|
|
Ok(false) => {
|
2018-09-18 05:19:48 +02:00
|
|
|
trace!("Worker is unavailable");
|
2018-09-14 09:07:50 +02:00
|
|
|
self.availability.set(false);
|
|
|
|
self.state = WorkerState::Unavailable(vec![msg]);
|
|
|
|
}
|
|
|
|
Err(idx) => {
|
2018-09-18 05:19:48 +02:00
|
|
|
trace!(
|
|
|
|
"Service {:?} failed, restarting",
|
|
|
|
self.factories[idx].name()
|
|
|
|
);
|
2018-09-14 09:07:50 +02:00
|
|
|
self.availability.set(false);
|
2018-09-14 22:07:38 +02:00
|
|
|
self.state = WorkerState::Restarting(
|
|
|
|
idx,
|
|
|
|
self.factories[idx].create(),
|
|
|
|
);
|
2018-09-14 09:07:50 +02:00
|
|
|
}
|
2018-09-14 08:46:01 +02:00
|
|
|
}
|
2018-09-14 09:07:50 +02:00
|
|
|
return self.poll();
|
2018-09-14 22:07:38 +02:00
|
|
|
}
|
2018-09-14 08:46:01 +02:00
|
|
|
Ok(Async::NotReady) => {
|
|
|
|
self.state = WorkerState::Available;
|
|
|
|
return Ok(Async::NotReady);
|
|
|
|
}
|
|
|
|
Ok(Async::Ready(None)) | Err(_) => return Ok(Async::Ready(())),
|
|
|
|
}
|
|
|
|
}
|
2018-09-12 17:25:14 +02:00
|
|
|
}
|
2018-09-14 08:46:01 +02:00
|
|
|
WorkerState::None => panic!(),
|
|
|
|
};
|
2018-09-07 22:06:51 +02:00
|
|
|
}
|
|
|
|
}
|