Performance – Wait for Triple Buffer Primitive for free

I'm trying to write a triple buffer that waits for an update and render loop after being banged once too often by a lock shortage (in another project, using a library).

By way of reference, a triple buffer is effectively a single producer, a single consumer channel that allows the producer to update the buffer, while the consumer can always receive the most recently updated buffer. Since the renderer only needs the latest status to extrapolate a (future) future status, and the simulation could be updated several times in between, this fits. Since simulation data tends to update EVERYTHING and also be arbitrary, I wanted the writing and reading to be completely unrestricted with respect to what can be done with the buffers. Since I had to deal with starved rendering threads in the last task, I decided to break the lock, and then came across what I consider maintenance free (it's practically a single) fetch_nand Statement).

Here is the code (as with GitHub):

use std :: {
cell :: UnsafeCell,
fmt :: Debug,
sync :: {
atomic :: {AtomicBool, AtomicUsize},
Arc,
},
};

use cb :: utils :: CachePadded;

#[allow(unused_imports)]
use log :: {debug, error, info, trace, warn};

pub fn buffer(src: T) -> (ReadingView, Edit view) {
TripleBuffer :: alloc (src)
}

#[derive(Debug)]
struct TripleBufferIndices {
snatched_read: CachePadded, // unique
packing_vals: CachePadded, // divided
deprecated: CachePadded, // divided
edit_rw: CachePadded<(usize, usize)>, // unique
}
impl TripleBufferIndices {
#[inline]
    fn pack (v0: usize, v1: usize) -> usize {
(0b0 << 4) + (v0 << 2) + ((!v1) & 0b11)
    }
    #[inline]
    fn unpack(packed: usize) -> (usize, usize) {
let should_negate = ((packet >> 4) & 0b1)! = 0;
let most_recent = (if should_negate {! packing} else {packing} >> 2) & 0b11;
let next_write =! packing & 0b11;
(most_recent, next_write)
}
snatch fn (& mut yourself) {
let mask = (0b1 << 4)
+ (0b11 << 2)
            + match *self.snatched_read {
                0 => 0b00,
1 => 0b01,
2 => 0b10,
_ => Panic! ("We messed up!"),
};
let old_snatched = self.snatched_read;
if! self.stale.swap (true, std :: sync :: atomic :: ordering :: acquire) {
* self.snatched_read = Self :: unpack (
self.packed_vals
.fetch_nand (mask, std :: sync :: atomic :: Ordering :: AcqRel),
)
.0;
Track!(
Msgstr "Get indexes {:?} And return indexes {:?}.",
old_snatched,
self.snatched_read
);
}
}
in advance (& itself) {
let next_write = Self :: unpack (self.packed_vals.swap (
Self :: pack (self.edit_rw.1, self.edit_rw.1),
std :: sync :: atomic :: ordering :: AcqRel,
))
.1;
self.stale.swap (false, std :: sync :: atomic :: Ordering :: Release);
Track!(
"Indices of {:?} On {:?} Advance.",
self.edit_rw.1,
next_write
);
self.edit_rw.0 = self.edit_rw.1;
self.edit_rw.1 = next_write;
}
}
impl Standard for TripleBufferIndices {
fn default () -> Self {
Even {
snatched_read: CachePadded :: new (0),
packing_vals: CachePadded :: new (AtomicUsize :: new (Self :: pack (0, 2))),
deprecated: CachePadded :: new (AtomicBool :: new (true)),
edit_rw: CachePadded :: new ((1, 2)),
}
}
}

struct TripleBuffer {
ii: UnsafeCell.
backing_mem: * const[UnsafeCell<CachePadded[UnsafeCell<CachePadded[UnsafeCell>; 3].
tt: [*mut T; 3].
}
impl Triple buffer {
pub fn alloc (src: T) -> (ReadingView, Edit view) {
let backing_mem = Box :: into_raw (Box :: new ()[
            UnsafeCell::new(CachePadded::new(src.clone())),
            UnsafeCell::new(CachePadded::new(src.clone())),
            UnsafeCell::new(CachePadded::new(src)),
        ]));
Let courage tt: [*mut T; 3] = uncertain {std :: mem :: uninitialized ()};
uncertain
for i in 0..3 {
tt[i] = & mut ** (* backing_mem)[i].receive();
}
}
let arc = Arc :: new (Self {
ii: UnsafeCell :: new (TripleBufferIndices :: default ()),
backing_mem,
tt,
});
(ReadingView (arc.clone ()), EditingView (arc))
}
snatch fn (& itself) {
let ii = self.ii.get ();
insecure {(* ii) .snatch ()};
}
in advance (& itself) {
let ii = self.ii.get ();
uncertain {(* ii) .advance ()};
}
fnrr (& self) -> * const T {
let ii = self.ii.get ();
self.tt[unsafe { *(*ii).snatched_read }]
    }
fn er (& self) -> * const T {
let ii = self.ii.get ();
self.tt[unsafe { (*ii).edit_rw.0 }]
    }
fn ew (& self) -> * courage T {
let ii = self.ii.get ();
self.tt[unsafe { (*ii).edit_rw.1 }]
    }
}
impl Drop for TripleBuffer {
fn drop (& mut yourself) {
uncertain
Box :: from_raw (self.backing_mem as * mut[CachePadded[CachePadded[CachePadded[CachePadded; 3]);
};
}
}

pub struct RWPair {
r: R,
w: W,
}

pub enum Reading {
ReadingView (ReadingView)
Reader (Reader)
}
pub struct ReadingView(Arc <TripleBuffer>);
impl Reading View {
pub fn read (self) -> Reader {
Reader :: from_view (self)
}
}
uncertain impl Send for ReadingView {}
pub struct reader {
Origin: ReadingView.
Locker: * const T,
}
impl reader {
pub fn from_view (rv: ReadingView) -> readers {
rv.0.snatch ();
Even {
Locker: rv.0.rr (),
origin: rv,
}
}
pub fn r<'a>(& & # 39; a self) -> && # 39; a T {
insecure {& * self.locker}
}
pub fn release (self) -> ReadingView {
self.origin
}
}

pub enum editing {
Edit view (EditingView)
Publisher (Editor)
}
pub struct EditingView(Arc <TripleBuffer>);
impl edit view {
pub fn edit (self) -> Editor {
Editor :: from_view (self)
}
}
uncertain impl Send for editingView {}
pub struct editor {
Origin: EditingView.
rw_lock: RWPair<*const T, *mut T>.
}
impl editor {
fn from_view (ev: EditingView) -> Editor {
Editor {
rw_lock: RWPair {
r: ev.0.er (),
w: ev.0.ew (),
},
origin: ev,
}
}
pub fn r<'a>(& & # 39; a self) -> && # 39; a T {
insecure {& * self.rw_lock.r}
}
pub fn w<'a>(& # 39; a self) -> && # 39; a courage T {
insecure {& mut * self.rw_lock.w}
}
pub fn release (self) -> edit view {
self.origin.0.advance ();
self.origin
}
}

And you can do it this way:

fn sort_of_a_test () {
let (rv, ev) = tb :: buffer ([0u8; 1_000]);
let e_th = std :: thread :: spawn (move || {
let mut ev = Some (ev);
let mut e = None;
for _ in 0..255 {
e.replace (ev.take (). unwrap (). edit ());
// edit something
let e_int = e.as_ref (). unwrap ();
for (rv, ev) in e_int.r (). iter (). zip (e_int.w (). iter_mut ()) {
* ev = * rv + 1;
}
ev.replace (e.take (). unwrap (). release ());
}
});
Let r_th = std :: thread :: spawn (move || {
let mut rv = Some (rv);
let mut r = None;
Leave Mut Scratch = 0;
Loop {
r.replace (rv.take (). unwrap (). read ());
// read a bit
for v in r.as_ref (). unwrap (). r (). iter () {
scratch = * v;
}
rv.replace (r.take (). unwrap (). release ());
if scratch == 255 {
break;
}
}
});
e_th.join (). expect ("Failed to join the edit thread.");
r_th.join (). expect ("Error while joining the reading thread.");
}

EDIT: As a side note I would like to avoid the option swap there as far as possible.

EDIT 2: forgetting to mention cb is the crossbeam Box.

Architecture – TradOffs between testability and performance for CQRS patterns

My team recently decided to adopt the CQRS pattern, which is similar to that of the famous Microsoft EShopsOnContainers Repository. In this solution, the queries are executed in query classes with Dapper and the commands in command handlers with Dapper EF Core,

The queries I make on my domain are quite complex and require a lot of filtering of the data. Without use LINQ, I rely on the use of purely dynamic SQL testify (Not my strength) query my domain model with multiple FROM WHERE Clauses etc.

This approach would make it extremely difficult to test my queries for units.

Here are the two approaches that I consider:

Approach 1:

String sql = @ "
SELECT FROM [dbo],[vw_LogisticsManagement] 
    FROM WHERE ([Status] NOT IN @WorkOrderStatuses)
AND ([PickupDate] IS NOT ZERO)
AND (0 = (DATEDIFF (day, [PickupDate]To get an appointment ()))
AND (0! = (DATEDIFF (day, [CheckInDate]To get an appointment ())))
SORT BY [PickupDate]";

// add parameters etc ...
var result = wait for connection.QueryAsync(SQL, parameters);

Advantages:

Disadvantage:

  • Hard to test
  • Hard to write (for someone without much SQL experience)

Approach 2

String sql = @ "SELECT * FROM [dbo],[vw_LogisticsManagement] 
var result = wait for connection.QueryAsync(SQL, parameters);

var wos = result.Where (where => where.PickupDate.HasValue
&& where.PickupDate.Value.Date == DateTime.Now.Date
&& where.CheckInDate.Value.Date! = DateTime.Now.Date
&& where.Status! = Status.send
&& where.Status! = Status.PartiallyShipped)
.OrderBy (where => where.PickupDate) .ToList ();

Advantages:

  • Very easy to write (Someone with a lot LINQ Experience)
  • Easy to test filters: I could mock the result that Dapper returns (no need for a database in memory), and then have a predefined list that I could use to apply the filters LINQ and have an expected result

Disadvantage:

  • Not as a performance. Enough to play a role at all?

Basically, so I try to weigh the pros and cons of each component. In my opinion, the testability in this case outweighs the potential increase in performance. However, not all members of my team are likely to agree.

In the end, Approach 1 would be ideal with appropriate tests because many of the questions I'm dealing with are far more complicated.

performance – Python program for "0-1 backpack problem"

With reference to – https://en.wikipedia.org/wiki/Knapsack_problem – this is the definition of a backpack or backpack problem:

knapsack problem

Here is my version of the backpack problem in Python:

def solve_naive (capacity, articles, weights, values):

grid = [[0] * (Capacity + 1)]for articles in assortment (article):
grid.append (grid[item].Copy())
for k in the range (weights[item]Capacity + 1):
grid[item + 1][k]    = max (grid[item][k], Grid[item][K-weights[K-weights[k-Gewichte[k-weights[item]]+ Values[item])

Solution value = grid[items][capacity]
    
    
    
    solution_weight = 0
forgive = []
    k = capacity
for articles in the range (Article, 0, -1):
if grid[item][k]    ! = Grid[item - 1][k]:
taken.append (item - 1)
k - = weights[item - 1]
            solution_weight + = weights[item - 1]

    return solution_value, solution_weight, taken

I would like to know if I could make this code shorter and more efficient.

NOTE 1 – The data is the Nasdaq 100 list of current prices and price estimates for one hundred shares (from one day in 2019). My investment budget is $ 10,000.

NOTE 2 – For your information, the investment (the weight solution) is 999930 cents (9999.30 USD) and the expected return ( solution value) is 1219475 cents (12,194.75 USD).

NOTE 3 – It will take some time for the Nasdaq 100 backpack to run on my computer.

Any help would be appreciated.

[WTS]█ ★ 80% OFF 4 LIFE✅ ♥ SSD ★ CLOUDLINUX @ 1Core / 1GB RAM ★ LITESPEED + LSCache✅ ♥ FREE WHMCS / Domain ★

SPECIAL HOSTING OFFERS 2019!Up to 80% off all reseller hosting plans VALID ONLY FOR A VERY LIMITED TIME!

SSD CLOUD proudly proclaims thatSPECIAL HOSTING OFFERS 2019!on our wordpress hosting, business hosting, ecommerce hosting, multi location server hosting, liteSpeed ​​reseller hosting, SSD cloud server, self or fully managed cloud VPS and Dedicated Servers.

BLAZING FAST RAID-PROTECTED SSD HARD DRIVES + LiteSpeed ​​+ CloudLinux + SOFTACULOUS PRO + FREE MIGRATION + FREE WHMCS, domain and SSL END USER SUPPORT + FREESEOTOOLS + CLOUDFLARE CDN!,

About SSD CLOUD!
SSD CLOUDSSD CLOUD is a full-service technology solutions provider offering web hosting, reseller hosting, SSD cloud servers, self-managed or fully managed VPS and dedicated servers. We offer affordable, flexible and reliable services! With this name you will grow and learn to trust all your hosting solutions.

Why should you choose us?

We provide affordable, reliable and powerful hosting services to people around the world. We provide high quality services at a very reasonable price that will help your business succeed online. We maintain a stable hosting environment and serve customers with a dedicated team for technical support, sales and invoice support.

HURRY UP!Grab this offer now for your personal or business use before the EXCLUSIVE SALE OFFER expires!

Web hosting features:

• Intel Xeon PowerfulE3 / E5 CPUServer!
• Lightning fastSSDDrives with high IOP!
RAID 10 privacyBest performance
Softaculous ProAuto Installer, Paper Lantern Theme
CloudFlareContent Delivery Network
Cloud LinuxOS +LitespeedWeb Server
Built-inSEO Tools Attracta
Every dayOffsite mirrorfuse

• File Manager with Web Enabled, POP / IMap / SMTP & Webmail
PHP 5with suPHP, MariaDB, CGI / Perl 5, CURL
Ruby on Rails, Python, Pearl, Zend Optimizer
• Systemwidespam protection

high-performance& HighestSecure server!
own brandNameserver (ns1.yourdomainname.com)!
Fully branded cPanelControl Panel (Skin) with your company logo!

Quote:

Use the following coupon codes to get up to 80% recurring discounts on all RESELLER HOSTING PLANS!

Quote:

80% DISCOUNT FOR LIFEto the BIENNIAL BILLING TERM with coupon code "SSDCLOUD80"


Quote:

70% DISCOUNT FOR LIFEon ANNUAL BILLING CONDITIONS with coupon code "SSDCLOUD70"


Quote:

30% DISCOUNT FOR LIFEwith coupon code "SSDCLOUD30"



SPECIAL HOSTING OFFERS 2019!VALID UNTIL 29 MAY 2019 WITH THE COUPON CODES INDICATED ABOVE.
▐▌▐▌HOSTING FOR BEGINNERS
20 GBRAID 10 SSD storage
400 GBmonthly bandwidth
Unlimiteddomains
UnlimitedAddon domains
UnlimitedE-mail accounts
UnlimitedFTP / Databases
FREEWHMCS, Domain & SSL
FREEMigration (cPanel accounts)
FREEEnd User Support (White Label Help Desk Portal)
monthly price:[s]$ 19.9 / m[/s] Now only $ 13.9 / m
Biennial price:[s]240 USD / year[/s] Now only $ 48 / year($ 3.9 / m) | OR | Only $ 168 / year with free WHMCS + domain and SSL for life with coupon code: "SSDCLOUD30"

ORDER NOW –New York, United States |Florida, USA | Los Angeles, United States |London, UK |SG, Asia

▐▌▐▌PREMIUM RESELLER HOSTING
30 GBRAID 10 SSD storage
500 GBmonthly bandwidth
Unlimiteddomains
UnlimitedAddon domains
UnlimitedE-mail accounts
UnlimitedFTP / Databases
FREEWHMCS, Domain * & SSL
FREEMigration (cPanel accounts)
FREEEnd User Support (White Label Help Desk Portal)
monthly price:[s]30 USD / million[/s] Now only $ 20.9 / m
Biennial price:[s]360 USD / year[/s] Now only $ 71.9 / year(5.9 $ / m) | OR | Only $ 252 / year with free WHMCS + domain and SSL for life with coupon code: "SSDCLOUD30"

ORDER NOW –New York, United States |Florida, USA | Los Angeles, United States |London, UK |SG, Asia

▐▌▐▌BUSINESS RESELLER HOSTING
40 GBRAID 10 SSD storage
600 GBmonthly bandwidth
Unlimiteddomains
UnlimitedAddon domains
UnlimitedE-mail accounts
UnlimitedFTP / Databases
FREEWHMCS, Domain & SSL
FREEMigration (cPanel accounts)
FREEEnd User Support (White Label Help Desk Portal)
monthly price:[s]$ 39.9 / m[/s] Now only $ 27.9 / m
Biennial price:[s]480 USD / year[/s] Now only $ 95.9 / year($ 7.9 / m) | OR | Only 336 USD / year with free WHMCS + domain and SSL for life with coupon code: "SSDCLOUD30"

ORDER NOW –New York, United States |Florida, USA | Los Angeles, United States | London, UK | SG, Asia

Frequently asked Questions:
1. Do you have an availability guarantee?

Yes, we guarantee a server availability of 99.9%.

2. Where are your servers?

Our reseller servers are located in Los Angeles, Florida and New York.US| London,United KingdomandSG(Asia).

3. Which payment method do you accept?

We accept payments byPayPal, Bitcoin, Debit / Credit Cards.

4. How long does it take to set up my account?

Your account will be set up immediately after receiving the first payment.

Note: * FREE Domain is valid for the term with an annual invoice term using the voucher code "SSDCLOUD30".
* FREE WHMCS is valid for all payment terms using the voucher code "SSDCLOUD30".

If you have questions, feel free to ask us a question. Visit our website for more informationPREMIUM RESELLER HOSTING,

,

Virtualization – TensorFlow performance in a virtual machine on old servers

I want to register some old servers (eg G5 HP servers) for the distributed TensorFlow calculation. You are running Windows 2012 r2 on bare metal. I'm thinking about using VirtualBox or Hyper-V to create an Ubuntu virtual machine guest on the servers to act as a TensorFlow cluster worker.

Ask:

  1. Has anyone performed performance benchmarking for a similar configuration (a) between different virtualization software and (b) versus bare-metal?

  2. Are there customizations / configurations that should be used to maximize performance in this configuration?

I find that I could not install modern operating systems (such as Ubuntu 16 and 19, and CentOS7) on the bare-metal system because outdated RAID controllers can not be disabled (which is why I'm concerned with virtualization).

C ++ Performance Benchmark – Code Review Stack Exchange

I am fairly new to C ++, so I wrote the following code as a learning exercise for efficiently using different simple data structures and what I should avoid and what I do not:

#include 
#include 
#include 
#include 
#include 

void primitive_test () {
unsigned long l = 20000000;
std :: unordered_map Map;
std :: vector vec1;
std :: vector vec2;
std :: vector vec3;
std :: deque que;
int * array = new int[l];
clock_t begin;

begin = clock ();
for (int i = 0; i <1; i ++) {
map[i] = i;
}
std :: cout << "std :: unordered_map:" << clock () - begin << & # 39;  & # 39 ;;

begin = clock ();
for (int i = 0; i <1; i ++) {
vec1.push_back (i);
}
std :: cout << "std :: vector (no reserve):" << clock () - begin << & # 39;  n & # 39 ;;

begin = clock ();
vec2.reserve (l);
for (int i = 0; i <1; i ++) {
vec2.push_back (i);
}
std :: cout << "std :: vector (reserve):" << clock () - begin << & # 39;  n & # 39 ;;

begin = clock ();
vec3.resize (l);
for (int i = 0; i <1; i ++) {
vec3[i] = i;
}
std :: cout << "std :: vector (resize):" << clock () - begin << & # 39;  n & # 39 ;;

begin = clock ();
for (int i = 0; i <1; i ++) {
que.push_back (i);
}
std :: cout << "std :: deque:" << clock () - begin << & # 39;  n & # 39 ;;

begin = clock ();
for (int i = 0; i <1; i ++) {
que.emplace_back (i);
}
std :: cout << "std :: deque (emplace):" << clock () - begin << & # 39;  n & # 39 ;;

begin = clock ();
for (int i = 0; i <1; i ++) {
array[i] = i;
}
std :: cout << "array:" << clock () - begin << & # 39;  n & # 39 ;;
}

void object_test () {
unsigned long l = 20000000;
std :: vector <std :: tuple> vec1;
std :: vector <std :: tuple> vec2;
std :: vector <std :: tuple> vec3;
std :: vector <std :: tuple> vec4;
std :: vector <std :: tuple> vec5;
std :: vector <std :: tuple> vec6;
std :: deque <std :: tuple> que;
auto * array = new std :: tuple[l];
clock_t begin;

begin = clock ();
for (int i = 0; i <1; i ++) {
std :: tuple Pi);
vec1.push_back (p);
}
std :: cout << "std :: vector (no reserve):" << clock () - begin << & # 39;  n & # 39 ;;

begin = clock ();
vec2.reserve (l);
for (int i = 0; i <1; i ++) {
std :: tuple Pi);
vec2.push_back (p);
}
std :: cout << "std :: vector (reserve):" << clock () - begin << & # 39;  n & # 39 ;;

begin = clock ();
vec3.resize (l);
for (int i = 0; i <1; i ++) {
std :: tuple Pi);
vec3[i] = p;
}
std :: cout << "std :: vector (resize):" << clock () - begin << & # 39;  n & # 39 ;;

begin = clock ();
for (int i = 0; i <1; i ++) {
vec4.emplace_back (i);
}
std :: cout << "std :: vector (emplace, no reserve):" << clock () - begin << & # 39;  n & # 39 ;;

begin = clock ();
vec5.reserve (l);
for (int i = 0; i <1; i ++) {
vec5.emplace_back (i);
}
std :: cout << "std :: vector (emplace, reserve):" << clock () - begin << & # 39;  n & # 39 ;;

// takes too long - is there a faster way to work in a specific position?
// begin = clock ();
// vec6.resize (l);
// for (int i = 0; i <l; i ++) {
// vec6.emplace (vec6.begin () + i, i);
//}
// std :: cout << "std :: vector (emplace, resize):" << clock () - begin << & # 39;  n & # 39 ;;

begin = clock ();
for (int i = 0; i <1; i ++) {
std :: tuple Pi);
que.push_back (p);
}
std :: cout << "std :: deque:" << clock () - begin << & # 39;  n & # 39 ;;

begin = clock ();
for (int i = 0; i <1; i ++) {
que.emplace_back (i);
}
std :: cout << "std :: deque (emplace):" << clock () - begin << & # 39;  n & # 39 ;;

begin = clock ();
for (int i = 0; i <1; i ++) {
std :: tuple Pi);
array[i] = p;
}
std :: cout << "array:" << clock () - begin << & # 39;  n & # 39 ;;
}

int main () {
std :: cout << " nPrimitive test.  n";
primitive_test ();
std :: cout << " nBrowse objects.  n";
object_test ();
}

The results of running the code on my computer are somewhat surprising to me, which leads me to believe that I may have made a mistake or not treat every case in a fair way (my comments in // below):

Test primitive.
std :: unordered_map: 8231979 // wow!
std :: vector (no reserve): 402252
std :: vector (reserve): 328177 // Why is reserving so much slower than resizing?
std :: vector (resize): 132258
std :: deque: 320600
std :: deque (emplace): 363182 // Storage a bit slower?
array: 64457 // should vector be so much slower?

Test objects.
std :: vector (no reserve): 1801645
std :: vector (reserve): 551038
std :: vector (resize): 1100332 // about twice as long as above?
std :: vector (emplace, no reserve): 1979130 // Injection is slower than reservation?
std :: vector (emplace, reserve): 675151
// Is there a way to place a specific position in a vector if we've already made room for it?
std :: deque: 579424 // faster than array?
std :: deque (emplace): 693743 // Storage is slower?
Array: 620844

Operation terminated with exit code 0

Here are my CMakeLists:

cmake_minimum_required (VERSION 3.12)
Project (Maptest)

set (CMAKE_CXX_STANDARD 11)
set (CMAKE_CXX_FLAGS_RELEASE "-O2")

add_executable (maptest main.cpp)

I run this code on a Predator Helios 300 laptop.

Can I improve this benchmark in any way? Did I do something wrong / inefficient or did I use a feature I should not have used? Is it just that the methodology / approach I use for testing is flawed?

[WTS] Balanced DEDICATED SERVERS + 35% discount for 3 months | Great uptime + Free setup!

Are you interested in Hosting services for dedicated high-end servers?

Cento host was one of the leading web hosting service providers since 2002, We use the latest Dell server technology in our own data center to ensure that our customers get the best performance from their websites. Our wide range of web hosting services will meet your hosting needs, whether you are building a small, simple website or a high-performance, high-traffic website. Our main goal is to provide the best available technology with exceptional customer support. With a large customer base, there are many requirements for web hosting. We have tailored our business to these needs by offering flexible and scalable services.

SPECIAL OFFER: First 3 MONTHS – 35% DISCOUNT

Our dedicated servers are located in own data center in Europe – Bosnia and Herzegovina, You get serial console access with performance servers. You can also install and customize applications.

Our features:

  • Unmeasured bandwidth
  • Free DDoS protection
  • IPv6 ready
  • Free setup
  • Free hardware RAID
  • Fast setup
  • Dell PowerEdge server
  • DDR3 ECC RAM
  • Guaranteed resource
  • server monitoring
  • Remote reboot
  • Single and double CPUs

Take a look at Centohost Dedicated server for beginners:
Start with a dedicated server in your budget – (Unmeasured Bandwidth + Free DDoS Protection + IPv6 Capable + Free Setup)

PE-R320 server package
Six Core Xeon E5-2420v2
Speed ​​- 6 cores x 1.9 GHz
Memory – 16GB DDR3
Hard disk – 2 TB (2 x 1 TB SATA3)
Bandwidth – unmeasured
PRICE – 129 USD / month – 83.85 USD / month – ORDER NOW

PE-R320 Server Package – 2
Six Core Xeon E5-2440
Speed ​​- 6 cores x 2.4 GHz
Memory – 16GB DDR3
Hard disk – 2 TB (2 x 1 TB SATA3)
Bandwidth – unmeasured
PRICE – $ 144 – $ 93.60 / month (3 months 35% off) – ORDER NOW

You can also try Centohost Performance-specific servers:
More powerful servers for medium and large projects – (Unmeasured Bandwidth + Free DDoS Protection + IPv6 Capable + Free Setup)

PE R610 Server Package – 1
2 x 6 Core Xeon X5650
Speed ​​- 12 cores x 2.7 GHz (+ HT)
Memory – 32 GB DDR3
Hard disk – 2 TB SAS (2 x 1 TB SATA) Optional
Bandwidth – unmeasured
PRICE – $ 243 – $ 157.95 / month (3 months 35% off) – ORDER NOW

PE R610 server package – 2
2 x 6 Core Xeon X5670
Speed ​​- 12 cores x 2.93 GHz (+ HT)
Memory – 32 GB DDR3
Hard disk – 2 TB SAS (2 x 1 TB SATA) Optional
Bandwidth – unmeasured
PRICE – $ 259 – $ 168.35 / month (3 months 35% off) – ORDER NOW

PE-R710 server package – 3
2 x 6 Core Xeon X5670
Speed ​​- 12 cores x 2.93 GHz (+ HT)
Memory – 32 GB DDR3
Hard disk – 2 TB SAS (2 x 1 TB SATA) Optional
Bandwidth – unmeasured
PRICE – 274 USD – 178.10 USD / month (3 months 35% discount) – ORDER NOW

PE-R620 server package
2 x 8 Core Xeon E5-2670
Speed ​​- 16 cores x 2.60 GHz (+ HT)
Memory – 64 GB DDR3
Hard disk – 2 TB SAS (2 x 1 TB SATA) Optional
Bandwidth – unmeasured
PRICE – $ 369 – $ 239.85 / month (3 months 35% off) – ORDER NOW

Included in the dedicated server:
Fast setup (In most cases our servers will be ready for use within 4 hours of your order.)
IPv4 and IPv6 network (All dedicated servers contain both IPv4 and IPv6 network connections and addresses.)
Undisturbed traffic (All our dedicated servers are shipped with unmetered data transfer.)
No set-up price (All our dedicated servers are FREE, even if you pay monthly.)
Free DDoS protection (All dedicated servers in our offering are protected with an advanced DDoS protection system.)
Free hardware RAID (We offer FREE Hardware Raid Controler for all our Performance Servers.)

=== >> Write us:
Centohost LLC
7260 W. Azure Dr Ste 140-796
Las Vegas, NV 89130, United States
E-mail: info@centohost.com
phone: 1-702-323-3802
fax: 1-702-323-3844

You are not sure which plan is best for you?
No problem. Send us a ticket with your questions and we will contact you with an offer. Free.

,

Performance – Should we improve the time to the first byte or the Page Speed ​​Insights score for SEO?

There are only two page load speed metrics that are really important:

  • Time until delivery of the HTML page (without elements like images, CSS and JS)
  • Time until the first content screen is loaded and usable

The HTML delivery time is important because Googlebot recognizes it most directly. This controls how quickly Googlebot can crawl your website. Algorithmic ranking penalties that Google applies are almost all based on this metric.

The time until the page is usable is important to the users. Users turn away from a site that is not quick to use. This has indirect SEO consequences, as Google determines that users are not satisfied with a website.

How fast does your website have to be?

  • Google will penalize sites where the HTML page is not served within 7 seconds.
  • Users are turning away from a website that can not be used within 3 seconds.

So focus on the users. They are much more picky than Google. You have three seconds. This is divided into:

  • 1 second to get your HTML page.
  • 1 second to download important CSS, JS and image assets.
  • 1 second for the browser to render the page.

Since TTFB is a component of HTML page submission, you must optimize it so that it represents part of the time it takes to submit HTML pages. Down to 200 to 500 ms.

Any assets that are not needed for the page to work should be delayed loading. Most JavaScript should be loaded asynchronously. For pictures below the fold, the load should be moved.

In this sense, the PageSpeed ​​Insights score can be completely ignored. I do not believe that Google uses this score at all directly in rankings. The tool and the score can be useful. It can tell you which optimizations might be available to you. This may help you to prioritize the optimizations to be performed. However, it is a mistake to pay attention only to the score. Instead, focus on making the site fast for users.

Also note that items that you can not influence affect your PageSpeed ​​Insights score. I have a website that completely loads its pages in 1.2 seconds and gets a 100 for their score. However, when I enable AdSense on the page, PageSpeed ​​Insights reports that the full load lasts 10 seconds and the score drops to 63 seconds. This is despite the fact that the base page is fully usable after 1.2 seconds and the ads are then loaded only delayed.

Performance – My Swift / C code with thread runs slower than the non-threaded version

I'm trying to improve the performance of my cellular automates laboratory. I have two arrays of Double representing the current values ​​and the next values.

When I do the calculation in a single thread, I get about 28 steps per second. However, when I split the work into 2, 3, or 4 pieces and put them in a queue for parallel requests, I still get about 28 steps per second. If I continue to increase the chunks, it will take longer and longer for the algorithm to complete. For example, 10 chunks reduce power to about 10 steps per second.

I test this on a 3rd generation iPad Pro with 4 power cores and 4 efficiency cores.

Radio step (from: Int, to: Int) {
for j in from .. <to {
for i in 0 .. <w {
AEMemoryClear (memory);

AEMemorySetValue (memory, SI, cells[i   + (j  )*w])
AEMemorySetValue (memory, aI, i! = 0 && j! = 0? Cells[i-1 + (j-1)*w] : 0)
AEMemorySetValue (memory, bI, j! = 0? Cells[i   + (j-1)*w] : 0)
AEMemorySetValue (memory, cI, i! = WB && j! = 0? Cells[i+1 + (j-1)*w] : 0)
AEMemorySetValue (Memory, dI, i! = WB? Cells[i+1 + (j  )*w] : 0)
AEMemorySetValue (memory, eI, i! = WB && j! = HB? Cells[i+1 + (j+1)*w] : 0)
AEMemorySetValue (memory, fI, j! = HB? Cells[i   + (j+1)*w] : 0)
AEMemorySetValue (memory, gI, i! = 0 && j! = HB? Cells[i-1 + (j+1)*w] : 0)
AEMemorySetValue (memory, hI, i! = 0? Cells[i-1 + (j  )*w] : 0)

AERecipeExecute (recipe, memory)

Next[i + j*w] = memory.pointee.slots[index].obj.a.x
}
}
}
func step () {

let start = DispatchTime.now ()

Let n: Int = 4
Let z be: int = h / n

let group = DispatchGroup ()
for i in 0 .. <n {
group.enter ()
DispatchQueue.global (qos: .userInteractive) .async { [unowned self] in the
self.step (from: i * z to: i == n-1? self.h: (i + 1) * z)
group.leave ()
}
}

group.notify (queue: .main) { [unowned self] in the
(self.cells, self.next) = (self.next, self.cells)

let end = DispatchTime.now ()
let delta = Double (end.uptimeNanoseconds - start.uptimeNanoseconds) / 1000000000
let target: Double = 1.0 / 60
print ("Calculation time:  (delta) or  (round (1 / delta)) PLC, which is  (round (delta / target * 100 * 10) / 10)% of target; number of cells:  (self. w) ^ 2 =  (self.w * self.h); Seconds per cell:  (delta / double (self.w * self.w)) ")
}

group.wait ()
}

I also notice another crazy thing: if I do the calculation once a second, the process takes more than twice as long as I do it several times a second. I can only imagine that in this case the efficiency core is used instead of the power core.

Note: AEMemorySetValue, AERecipeExecute, AEMemoryClear are c functions.