+/*
+Minetest-c55
+Copyright (C) 2010-2011 celeron55, Perttu Ahola <celeron55@gmail.com>
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along
+with this program; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+*/
+
/*
(c) 2010 Perttu Ahola <celeron55@gmail.com>
*/
#include "environment.h"
#include "common_irrlicht.h"
#include <string>
+#include "utility.h"
+#include "porting.h"
+#include "map.h"
+#include "inventory.h"
-#ifdef _WIN32
- #include <windows.h>
- #define sleep_ms(x) Sleep(x)
-#else
- #include <unistd.h>
- #define sleep_ms(x) usleep(x*1000)
-#endif
+/*
+ Some random functions
+*/
+v3f findSpawnPos(ServerMap &map);
+/*
+ A structure containing the data needed for queueing the fetching
+ of blocks.
+*/
struct QueuedBlockEmerge
{
v3s16 pos;
*/
void addBlock(u16 peer_id, v3s16 pos, u8 flags)
{
+ DSTACK(__FUNCTION_NAME);
+
JMutexAutoLock lock(m_mutex);
if(peer_id != 0)
JMutexAutoLock lock(m_mutex);
return m_queue.size();
}
-
-private:
- core::list<QueuedBlockEmerge*> m_queue;
- JMutex m_mutex;
-};
-
-class SimpleThread : public JThread
-{
- bool run;
- JMutex run_mutex;
-
-public:
-
- SimpleThread():
- JThread(),
- run(true)
+
+ u32 peerItemCount(u16 peer_id)
{
- run_mutex.Init();
- }
+ JMutexAutoLock lock(m_mutex);
- virtual ~SimpleThread()
- {}
+ u32 count = 0;
- virtual void * Thread() = 0;
+ core::list<QueuedBlockEmerge*>::Iterator i;
+ for(i=m_queue.begin(); i!=m_queue.end(); i++)
+ {
+ QueuedBlockEmerge *q = *i;
+ if(q->peer_ids.find(peer_id) != NULL)
+ count++;
+ }
- bool getRun()
- {
- JMutexAutoLock lock(run_mutex);
- return run;
- }
- void setRun(bool a_run)
- {
- JMutexAutoLock lock(run_mutex);
- run = a_run;
+ return count;
}
- void stop()
- {
- setRun(false);
- while(IsRunning())
- sleep_ms(100);
- }
+private:
+ core::list<QueuedBlockEmerge*> m_queue;
+ JMutex m_mutex;
};
class Server;
u8 pending_serialization_version;
RemoteClient():
- m_time_from_building(0.0),
- m_num_blocks_in_emerge_queue(0)
+ m_time_from_building(9999),
+ m_excess_gotblocks(0)
{
peer_id = 0;
serialization_version = SER_FMT_VER_INVALID;
pending_serialization_version = SER_FMT_VER_INVALID;
m_nearest_unsent_d = 0;
-
- m_blocks_sent_mutex.Init();
- m_blocks_sending_mutex.Init();
+ m_nearest_unsent_reset_timer = 0.0;
+ m_nothing_to_send_counter = 0;
+ m_nothing_to_send_pause_timer = 0;
}
~RemoteClient()
{
void GetNextBlocks(Server *server, float dtime,
core::array<PrioritySortedBlockTransfer> &dest);
- // Connection and environment should be locked when this is called
- // steps() objects of blocks not found in active_blocks, then
- // adds those blocks to active_blocks
+ /*
+ Connection and environment should be locked when this is called.
+ steps() objects of blocks not found in active_blocks, then
+ adds those blocks to active_blocks
+ */
void SendObjectData(
Server *server,
float dtime,
void SetBlockNotSent(v3s16 p);
void SetBlocksNotSent(core::map<v3s16, MapBlock*> &blocks);
- void BlockEmerged();
-
- /*bool IsSendingBlock(v3s16 p)
- {
- JMutexAutoLock lock(m_blocks_sending_mutex);
- return (m_blocks_sending.find(p) != NULL);
- }*/
-
s32 SendingCount()
{
- JMutexAutoLock lock(m_blocks_sending_mutex);
return m_blocks_sending.size();
}
void PrintInfo(std::ostream &o)
{
- JMutexAutoLock l2(m_blocks_sent_mutex);
- JMutexAutoLock l3(m_blocks_sending_mutex);
o<<"RemoteClient "<<peer_id<<": "
- <<"m_num_blocks_in_emerge_queue="
- <<m_num_blocks_in_emerge_queue.get()
- <<", m_blocks_sent.size()="<<m_blocks_sent.size()
+ <<"m_blocks_sent.size()="<<m_blocks_sent.size()
<<", m_blocks_sending.size()="<<m_blocks_sending.size()
<<", m_nearest_unsent_d="<<m_nearest_unsent_d
+ <<", m_excess_gotblocks="<<m_excess_gotblocks
<<std::endl;
+ m_excess_gotblocks = 0;
}
// Time from last placing or removing blocks
- MutexedVariable<float> m_time_from_building;
+ float m_time_from_building;
+
+ /*JMutex m_dig_mutex;
+ float m_dig_time_remaining;
+ // -1 = not digging
+ s16 m_dig_tool_item;
+ v3s16 m_dig_position;*/
-private:
/*
- All members that are accessed by many threads should
- obviously be behind a mutex. The threads include:
- - main thread (calls step())
- - server thread (calls AsyncRunStep() and Receive())
- - emerge thread
+ List of active objects that the client knows of.
+ Value is dummy.
*/
-
- //TODO: core::map<v3s16, MapBlock*> m_active_blocks
-
- // Number of blocks in the emerge queue that have this client as
- // a receiver. Used for throttling network usage.
- MutexedVariable<s16> m_num_blocks_in_emerge_queue;
+ core::map<u16, bool> m_known_objects;
+private:
/*
Blocks that have been sent to client.
- These don't have to be sent again.
core::map<v3s16, bool> m_blocks_sent;
s16 m_nearest_unsent_d;
v3s16 m_last_center;
- JMutex m_blocks_sent_mutex;
+ float m_nearest_unsent_reset_timer;
+
/*
Blocks that are currently on the line.
This is used for throttling the sending of blocks.
Value is time from sending. (not used at the moment)
*/
core::map<v3s16, float> m_blocks_sending;
- JMutex m_blocks_sending_mutex;
-};
-/*struct ServerSettings
-{
- ServerSettings()
- {
- creative_mode = false;
- }
- bool creative_mode;
-};*/
+ /*
+ Count of excess GotBlocks().
+ There is an excess amount because the client sometimes
+ gets a block so late that the server sends it again,
+ and the client then sends two GOTBLOCKs.
+ This is resetted by PrintInfo()
+ */
+ u32 m_excess_gotblocks;
+
+ // CPU usage optimization
+ u32 m_nothing_to_send_counter;
+ float m_nothing_to_send_pause_timer;
+};
-class Server : public con::PeerHandler
+class Server : public con::PeerHandler, public MapEventReceiver,
+ public InventoryManager
{
public:
/*
NOTE: Every public method should be thread-safe
*/
+
Server(
- std::string mapsavedir,
- bool creative_mode,
- HMParams hm_params,
- MapParams map_params,
- float objectdata_inverval,
- u16 active_object_range
- );
+ std::string mapsavedir
+ );
~Server();
void start(unsigned short port);
void stop();
+ // This is mainly a way to pass the time to the server.
+ // Actual processing is done in an another thread.
void step(float dtime);
+ // This is run by ServerThread and does the actual processing
void AsyncRunStep();
void Receive();
void ProcessData(u8 *data, u32 datasize, u16 peer_id);
- /*void Send(u16 peer_id, u16 channelnum,
- SharedBuffer<u8> data, bool reliable);*/
+ core::list<PlayerInfo> getPlayerInfo();
- // Environment and Connection must be locked when called
- void SendBlockNoLock(u16 peer_id, MapBlock *block, u8 ver);
- //void SendBlock(u16 peer_id, MapBlock *block, u8 ver);
- //TODO: Sending of many blocks in a single packet
-
- // Environment and Connection must be locked when called
- //void SendSectorMeta(u16 peer_id, core::list<v2s16> ps, u8 ver);
+ u32 getDayNightRatio()
+ {
+ return time_to_daynight_ratio(m_time_of_day.get());
+ }
- core::list<PlayerInfo> getPlayerInfo();
+ bool getShutdownRequested()
+ {
+ return m_shutdown_requested.get();
+ }
+ /*
+ Shall be called with the environment locked.
+ This is accessed by the map, which is inside the environment,
+ so it shouldn't be a problem.
+ */
+ void onMapEditEvent(MapEditEvent *event);
+
+ /*
+ Shall be called with the environment and the connection locked.
+ */
+ Inventory* getInventory(InventoryContext *c, std::string id);
+ void inventoryModified(InventoryContext *c, std::string id);
+
private:
// Virtual methods from con::PeerHandler.
// As of now, these create and remove clients and players.
- // TODO: Make it possible to leave players on server.
void peerAdded(con::Peer *peer);
void deletingPeer(con::Peer *peer, bool timeout);
+ /*
+ Static send methods
+ */
+
+ static void SendHP(con::Connection &con, u16 peer_id, u8 hp);
+
+ /*
+ Non-static send methods
+ */
+
// Envlock and conlock should be locked when calling these
void SendObjectData(float dtime);
void SendPlayerInfos();
void SendInventory(u16 peer_id);
+ void SendChatMessage(u16 peer_id, const std::wstring &message);
+ void BroadcastChatMessage(const std::wstring &message);
+ void SendPlayerHP(Player *player);
+ void SendMovePlayer(Player *player);
+ /*
+ Send a node removal/addition event to all clients except ignore_id.
+ Additionally, if far_players!=NULL, players further away than
+ far_d_nodes are ignored and their peer_ids are added to far_players
+ */
+ void sendRemoveNode(v3s16 p, u16 ignore_id=0,
+ core::list<u16> *far_players=NULL, float far_d_nodes=100);
+ void sendAddNode(v3s16 p, MapNode n, u16 ignore_id=0,
+ core::list<u16> *far_players=NULL, float far_d_nodes=100);
+
+ // Environment and Connection must be locked when called
+ void SendBlockNoLock(u16 peer_id, MapBlock *block, u8 ver);
+
// Sends blocks to clients
void SendBlocks(float dtime);
+
+ /*
+ Something random
+ */
+
+ void UpdateCrafting(u16 peer_id);
// When called, connection mutex should be locked
RemoteClient* getClient(u16 peer_id);
+ // Connection must be locked when called
+ std::wstring getStatusString();
+
+ /*
+ Get a player from memory or creates one.
+ If player is already connected, return NULL
+
+ Call with env and con locked.
+ */
+ Player *emergePlayer(const char *name, const char *password,
+ u16 peer_id);
+
+ /*
+ Update water pressure.
+ This also adds suitable nodes to active_nodes.
+
+ environment has to be locked when calling.
+ */
+ /*void UpdateBlockWaterPressure(MapBlock *block,
+ core::map<v3s16, MapBlock*> &modified_blocks);*/
+
+ // Locks environment and connection by its own
+ struct PeerChange;
+ void handlePeerChange(PeerChange &c);
+ void handlePeerChanges();
+
+ //float m_flowwater_timer;
+ float m_liquid_transform_timer;
+ float m_print_info_timer;
+ float m_objectdata_timer;
+ float m_emergethread_trigger_timer;
+ float m_savemap_timer;
+
// NOTE: If connection and environment are both to be locked,
// environment shall be locked first.
-
JMutex m_env_mutex;
- Environment m_env;
+ ServerEnvironment m_env;
JMutex m_con_mutex;
con::Connection m_con;
BlockEmergeQueue m_emerge_queue;
- // Settings
- bool m_creative_mode;
- float m_objectdata_interval;
- u16 m_active_object_range;
+ // Nodes that are destinations of flowing liquid at the moment
+ //core::map<v3s16, u8> m_flow_active_nodes;
+
+ // 0-23999
+ MutexedVariable<u32> m_time_of_day;
+ // Used to buffer dtime for adding to m_time_of_day
+ float m_time_counter;
+ float m_time_of_day_send_timer;
+
+ MutexedVariable<double> m_uptime;
+
+ enum PeerChangeType
+ {
+ PEER_ADDED,
+ PEER_REMOVED
+ };
+
+ struct PeerChange
+ {
+ PeerChangeType type;
+ u16 peer_id;
+ bool timeout;
+ };
+
+ Queue<PeerChange> m_peer_change_queue;
+
+ std::string m_mapsavedir;
+
+ MutexedVariable<bool> m_shutdown_requested;
+
+ /*
+ Queue of map edits from the environment for sending to the clients
+ This is behind m_env_mutex
+ */
+ Queue<MapEditEvent*> m_unsent_map_edit_queue;
+ /*
+ Set to true when the server itself is modifying the map and does
+ all sending of information by itself.
+ This is behind m_env_mutex
+ */
+ bool m_ignore_map_edit_events;
+ /*
+ If set to !=0, the incoming MapEditEvents are modified to have
+ this peed id as the disabled recipient
+ This is behind m_env_mutex
+ */
+ u16 m_ignore_map_edit_events_peer_id;
friend class EmergeThread;
friend class RemoteClient;
};
+/*
+ Runs a simple dedicated server loop.
+
+ Shuts down when run is set to false.
+*/
+void dedicated_server_loop(Server &server, bool &run);
+
#endif