c++ – A program whose receive 10 number from user and calculate the sum of even numbers

Hello I want to use c++for creat a program whose receive 10 number from user, and given the sum of even numbers.

My attempt :

#include<iostream >
using namespace std;
int main () 
{
int num, number ;
int sumnum=0;

for(int i=1;i<=10;i++){

cout<<"enter a num" <<i<<endl;
cin>>num;


} 

number=num%2;
sumnum +=number ;
if(number ==0){
cout<<sumnum;
} 



} 

My attempt didn’t work

please I want to develop this attempt, I don’t want to know any other method.

Any comments, hint… are welcome.

Running Program Again With Loop in Java

Running Program Again With Loop in Java – Code Review Stack Exchange

c++ – Structuring the program that demonstrates collision detection within a field of view

I recently needed to detect agents within a certain-field of view and came across this question in gamedev stack exchange. To learn how it works, I followed the first answer’s guidance and decided to make a program that demonstrates how a “a field of view like collision detection” is done. But throughout the process, I struggled a lot with how to structure the program. Here is the code.

#define OLC_PGE_APPLICATION
#include "olcPixelGameEngine.h"

#define PI 3.14159f
#define MAX(a, b) a > b ? a : b
#define MIN(a, b) a > b ? b : a

struct Point
{
    Point()
    {
    }

    Point(olc::vf2d _position, float _directionAngle, float _rotationAngle) :
        position(_position), directionAngle(_directionAngle), rotationAngle(_rotationAngle)
    {
    }
    olc::vf2d position = { 0.0f, 0.0f };
    float directionAngle = 0.0f;
    float rotationAngle = 0.0f;
    bool withinSensoryRange = false;
    olc::Pixel color;
};

struct Triangle
{
    Triangle()
    {
    }

    Triangle(olc::vf2d _p1, olc::vf2d _p2, olc::vf2d _p3) :
        p1(_p1), p2(_p2), p3(_p3)
    {
    }

    olc::vf2d p1 = { 0.0f,   -7.0f };
    olc::vf2d p2 = { -5.0f,   5.0f };
    olc::vf2d p3 = { 5.0f,    5.0f };

    Triangle TranslateAndRotate(const float rotationAngle, olc::vf2d offset)
    {
        Triangle tri;
        tri.p1.x = cosf(rotationAngle) * p1.x - sinf(rotationAngle) * p1.y + offset.x;
        tri.p1.y = sinf(rotationAngle) * p1.x + cosf(rotationAngle) * p1.y + offset.y;
        tri.p2.x = cosf(rotationAngle) * p2.x - sinf(rotationAngle) * p2.y + offset.x;
        tri.p2.y = sinf(rotationAngle) * p2.x + cosf(rotationAngle) * p2.y + offset.y;
        tri.p3.x = cosf(rotationAngle) * p3.x - sinf(rotationAngle) * p3.y + offset.x;
        tri.p3.y = sinf(rotationAngle) * p3.x + cosf(rotationAngle) * p3.y + offset.y;
        return tri;
    }
};

class PlayGround : public olc::PixelGameEngine
{

public:

    PlayGround()
    {
        sAppName = "PlayGround";
    }

private:

    bool debug = true;

private:

    Triangle agent1;
    float rotationAngle1 = 0.0f;
    float sensoryRadius1 = 50.0f;
    float fov1 = PI;
    float agent1Speed = 120.0f;
    float directionPointDistance1 = 60.0f;
    olc::vf2d position1 = { 300.0f, 150.0f };

private:

    olc::Pixel offWhite = olc::Pixel(200, 200, 200);

private:

    float pointsSpeed = 300.0f;
    int nPoints = 1000;
    std::vector<std::unique_ptr<Point>> points;

private:

    float GetDistance(float x1, float y1, float x2, float y2)
    {
        return sqrtf((x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1));
    }

    float DirectionAngle(float rotationAngle)
    {
        return rotationAngle - (PI / 2.0f);
    }

private:

    bool OnUserCreate() override
    {
        for (int i = 0; i < nPoints; i++)
        {
            //4 random floats between 0 and 1 for initializing x, y and rotation angle and direction angle for point
            float rx = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
            float ry = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
            float rra = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
            float rda = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
            std::unique_ptr<Point> point = std::make_unique<Point>(olc::vf2d(rx * 600, ry * 300), rda * (PI * 2), rra * (PI * 2));
            points.push_back(std::move(point));
        }

        return true;
    }

    bool OnUserUpdate(float elapsedTime) override
    {

        //USER CONTROLS

        if (GetKey(olc::UP).bHeld)
        {
            position1.x += cosf(DirectionAngle(rotationAngle1)) * elapsedTime * agent1Speed;
            position1.y += sinf(DirectionAngle(rotationAngle1)) * elapsedTime * agent1Speed;
        }
        if (GetKey(olc::RIGHT).bHeld)
            rotationAngle1 += 3.0f * elapsedTime;
        if (GetKey(olc::LEFT).bHeld)
            rotationAngle1 -= 3.0f * elapsedTime;
        if (GetKey(olc::Q).bHeld)
            fov1 -= 3.0f * elapsedTime;
        if (GetKey(olc::W).bHeld)
            fov1 += 3.0f * elapsedTime;
        if (GetKey(olc::A).bHeld)
            sensoryRadius1 -= 50.0f * elapsedTime;
        if (GetKey(olc::S).bHeld)
            sensoryRadius1 += 50.0f * elapsedTime;
        if (GetKey(olc::D).bPressed)
            debug = !debug;

        fov1 = MAX(MIN(fov1, PI), 0);
        sensoryRadius1 = MAX(MIN(sensoryRadius1, 200), 0);

        //TRANSFORMATIONS FOR TRIANGLE

        Triangle transformedAgent1 = agent1.TranslateAndRotate(rotationAngle1, position1);

        //points that connects to the triangle to show the directiom vector
        olc::vf2d direction1;
        direction1.x = (cosf(DirectionAngle(rotationAngle1)) * directionPointDistance1) + position1.x;
        direction1.y = (sinf(DirectionAngle(rotationAngle1)) * directionPointDistance1) + position1.y;

        //these are the two field of view points one at angle + fov and other at angle - fov
        olc::vf2d fovPoints11;
        olc::vf2d fovPoints12;

        //calculating position based on the position of triangle, fov and the sensory range
        fovPoints11.x = (cosf(DirectionAngle(rotationAngle1 + fov1)) * sensoryRadius1) + position1.x;
        fovPoints11.y = (sinf(DirectionAngle(rotationAngle1 + fov1)) * sensoryRadius1) + position1.y;

        fovPoints12.x = (cosf(DirectionAngle(rotationAngle1 - fov1)) * sensoryRadius1) + position1.x;
        fovPoints12.y = (sinf(DirectionAngle(rotationAngle1 - fov1)) * sensoryRadius1) + position1.y;

        //COLLISION DETECTION

        //within the sensory radius
        for (auto& point : points)
        {
            float distance = GetDistance(point->position.x, point->position.y, position1.x, position1.y);
            if (distance < sensoryRadius1)
                point->withinSensoryRange = true;
            else
            {
                point->color = olc::BLACK;
                point->withinSensoryRange = false;
            }
        }

        //within the field of view
        for (auto& point : points)
        {
            if (point->withinSensoryRange)
            {
                olc::vf2d normalizedForwardVector = (direction1 - position1).norm();
                olc::vf2d normalizedPointCentreVector = (point->position - position1).norm();

                float dot = normalizedPointCentreVector.dot(normalizedForwardVector);

                if (dot >= cosf(fov1))
                    debug ? point->color = olc::RED : point->color = olc::WHITE;
                else
                    debug ? point->color = olc::GREEN : point->color = olc::BLACK;
            }
        }

        //RENDERING 

        Clear(olc::Pixel(52, 55, 54));

        if (debug)
        {
            //draw control instructions
            DrawString(2, 40, "This is a toy program made to demonstrate how collisionndetection within "
                "a field of view works. Black flies represent thenpoints that are comletely out "
                "of range. In debug mode,nGreen ones represent the ones that are within the sensorynraidus. The "
                "ones in the sensory radius are tested tonsee if they are in the field of view, and "
                "if theynare,they appear red.nnWhen debug mode is off, white fliesnrepresent the flies that can "
                "be seen", offWhite);

            DrawString(2, 10,
                "Press up, right and left keys for movement.n"
                "Press w to increase FOV and q to reduce it.n"
                "Press s to increase sensory range and a to decrease it.", offWhite);
        }

        DrawString(2, 290, "Press d to toggle text and geometric debug data.", olc::Pixel(200, 250, 200));

        //display info 
        std::ostringstream fovValue;
        fovValue << "FOV: " << round(fov1 * 2.0f * (180 / PI)) << " degrees";
        DrawString(440, 280, fovValue.str(), offWhite);

        std::ostringstream sensoryRangeValue;
        sensoryRangeValue << "Sensory Range: " << round(sensoryRadius1);
        DrawString(440, 265, sensoryRangeValue.str(), offWhite);


        //transform (wobble while moving forward) and draw all the points
        for (auto& point : points)
        {
            point->rotationAngle += 0.05f;
            point->directionAngle -= 0.05f;
            point->position.x += cosf(point->directionAngle) * sinf(point->rotationAngle) * elapsedTime * pointsSpeed;
            point->position.y += sinf(point->directionAngle) * sinf(point->rotationAngle) * elapsedTime * pointsSpeed;

            if (point->rotationAngle > PI * 2)
                point->rotationAngle = 0;
            if (point->rotationAngle < 0)
                point->rotationAngle = PI * 2;
            if (point->directionAngle > PI * 2)
                point->directionAngle = 0;
            if (point->directionAngle < 0)
                point->directionAngle = PI * 2;

            if (point->position.x > 600)
                point->position.x = 0;
            if (point->position.x < 0)
                point->position.x = 600;
            if (point->position.y > 300)
                point->position.y = 0;
            if (point->position.y < 0)
                point->position.y = 300;

            Draw((int)point->position.x, (int)point->position.y, point->color);
        }


        if (debug)
        {
            //lines from centre of triangle to fov points
            DrawLine((int)position1.x, (int)position1.y, (int)fovPoints11.x, (int)fovPoints11.y, olc::RED);
            DrawLine((int)position1.x, (int)position1.y, (int)fovPoints12.x, (int)fovPoints12.y, olc::RED);
            //field of view points
            FillCircle((int)fovPoints11.x, (int)fovPoints11.y, 2, olc::RED);
            FillCircle((int)fovPoints12.x, (int)fovPoints12.y, 2, olc::RED);
            //color the points between the two fov points in red
            float tempAngle = DirectionAngle(rotationAngle1 + fov1);
            while (tempAngle > DirectionAngle(rotationAngle1 - fov1))
            {
                for (int i = 0; i < 2; i++)
                    Draw((int)(cosf(tempAngle) * sensoryRadius1 - i) + position1.x,
                        (int)(sinf(tempAngle) * sensoryRadius1 - i) + position1.y, olc::RED);
                tempAngle -= 0.01f;
            }
            //draw sensory radius
            DrawCircle((int)position1.x, (int)position1.y, sensoryRadius1, olc::GREEN);
            //the straingt line signifying direction
            DrawLine((int)position1.x, (int)position1.y, (int)direction1.x, (int)direction1.y, offWhite);
        }
        //Draw the main triangle body
        FillTriangle(
            (int)transformedAgent1.p1.x, (int)transformedAgent1.p1.y,
            (int)transformedAgent1.p2.x, (int)transformedAgent1.p2.y,
            (int)transformedAgent1.p3.x, (int)transformedAgent1.p3.y,
            offWhite);


        return true;
    }
};

int main()
{
    PlayGround playGround;
    if (playGround.Construct(600, 300, 2, 2))
        playGround.Start();
}

I am sure its bad and tons of optimizations can be made, but for this particular question, I want to focus on how I could have structured it better. Thank you.

It is made using pixel game engine, so if you wish to test it out, it needs this. Its a single file library, so easy to set up.

design – What desing pattern / class / interface should I use for encapsulating a program?

I am building a chess – related application, and I want to use a pre-compiled program called Stockfish as my chess engine. I am wondering what is the best practice to encapsulate the usage of the program. There are many design patterns to match – Strategy, as the program is, after all, an algorithm that operate on some context (a.k.a the game position). Adapter, as what I essentially do is to provide an interface for a pre-existing program, Facade, as the engine is, in fact, an entire subsystem of the program, or even just a regular interface, that specifies all engine operations like this example, and more. Of course, every solution requires easy switch to another engine.

What is the best practice in this case?

google sheets – Only the first thread is displayed in my Apps Scripts program, but I need all the threads

My issue is that the first thread in my inbox is being displayed 45 times which is the number of messages in my inbox. What I need is the for loop to go through all the threads and gather all the information in the threads into a 2D array.

I know the issue is most likely with var message = threads(i).getMessages()(0) but I’m not sure how to solve the problem.

This my code:

function TheaThreads(){
    var SS =SpreadsheetApp.getActiveSpreadsheet();
    var ThreadSheet = SS.getSheetByName("Threads");
    var threads = GmailApp.getInboxThreads();
  for (var i=0; i < threads.length; i++) {
    var message = threads(i).getMessages()(0),
    label = threads(i).getLabels(),
    //subject = message.getSubject(),
    //content = message.getPlainBody(),
    ident = message.getId(),
    emailfrom = message.getFrom();

  if(label==null|| label== undefined|| label.length==0){
    label="No Label";
  }

  var threadArray = new Array();
  for(i=0; i<threads.length; i++){
    threadArray(i) = new Array();
    threadArray(i)(0)= ident;
    threadArray(i)(1)= label;
    threadArray(i)(2)= emailfrom;

    Logger.log(threadArray);
    }
   }

Do Program and Shutter priority modes work on a Nikon N90s with a Series E lens?

According to the Nikon Camera and Lens Compatibility Chart at Nikonians.org, no, you will not be able to use Program or Shutter-priority modes on your N90s with AI, AI-S, or E-series lenses.

Quoting a section from the chart:

Nikon Film SLR AI,AI-S,E
N90s/F90x MF1,2

Notes

  • MF Manual Focus
  • 1 Only in A (Aperture Priority) or M (Manual) modes. P (Program) or S (Shutter priority) exposure modes will not function.
  • 2 No 3D Matrix Exposure Metering.

functional programming – C program to print a vertical english ruler whose length is given by the user

In this source code, i have writena program, using for loops, to print an english ruler whose length is given by the user.
So what i want to know is that is it possible to modify this program, to print the ruler in a more logical way And if so, how can it be done, without applying reculsions ?

This is the program to print ruler vertically.

#include<stdio.h>
int main()
{
int q,num,x;
int y,p,n;
printf("Enter height of central dashesn");
scanf("%d",&n);
printf("Enter number of inchesn");
scanf("%d",&num);
for(x=0;x<num;x++)
{
for(q=0;q<n;q++)
{
printf("-");
}
printf("%dn",x);
for(p=0;p<n-3;p++)
{
printf("-");
}
printf("n");
for(p=0;p<n-2;p++)
{
printf("-");
}
printf("n");
for(p=0;p<n-3;p++)
{
printf("-");
}
printf("n");
for(p=0;p<n-1;p++)
{
printf("-");
}
printf("n");
for(p=0;p<n-3;p++)
{
printf("-");
}
printf("n");
for(p=0;p<n-2;p++)
{
printf("-");
}
printf("n");
for(p=0;p<n-3;p++)
{
printf("-");
}
printf("n");
}
for(y=0;y<n;y++)
{
    printf("-");
}
printf("%d",x);
printf("n");
return 0;
}

This is the output for vertical ruler

DreamProxies - Cheapest USA Elite Private Proxies 100 Private Proxies 200 Private Proxies 400 Private Proxies 1000 Private Proxies 2000 Private Proxies ExtraProxies.com - Buy Cheap Private Proxies Buy 50 Private Proxies Buy 100 Private Proxies Buy 200 Private Proxies Buy 500 Private Proxies Buy 1000 Private Proxies Buy 2000 Private Proxies ProxiesLive Proxies-free.com New Proxy Lists Every Day Proxies123